From b5a60245d9c19d64a053eb650f1b584299be804a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 13 Jan 2016 14:25:44 -0500 Subject: [PATCH 01/78] Removed S3 datasink stuff --- nipype/interfaces/io.py | 749 +++++++---------------------- nipype/interfaces/tests/test_io.py | 221 ++------- 2 files changed, 224 insertions(+), 746 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 86359756f6..5909843c34 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -17,6 +17,11 @@ >>> os.chdir(datadir) """ + +from builtins import zip +from builtins import filter +from builtins import range + import glob import fnmatch import string @@ -29,11 +34,19 @@ from warnings import warn import sqlite3 -from nipype.utils.misc import human_order_sorted -from nipype.external import six -from ..utils.misc import str2bool +from .base import (TraitedSpec, traits, File, Directory, + BaseInterface, InputMultiPath, isdefined, + OutputMultiPath, DynamicTraitedSpec, + Undefined, BaseInterfaceInputSpec) from .. import config +from ..external.six import string_types +from ..utils.filemanip import (copyfile, list_to_filename, + filename_to_list) +from ..utils.misc import human_order_sorted +from ..utils.misc import str2bool +from .. import logging +iflogger = logging.getLogger('interface') try: import pyxnat @@ -51,16 +64,6 @@ except: pass -from nipype.interfaces.base import (TraitedSpec, traits, File, Directory, - BaseInterface, InputMultiPath, isdefined, - OutputMultiPath, DynamicTraitedSpec, - Undefined, BaseInterfaceInputSpec) -from nipype.utils.filemanip import (copyfile, list_to_filename, - filename_to_list) - -from .. import logging -iflogger = logging.getLogger('interface') - def copytree(src, dst, use_hardlink=False): """Recursively copy a directory tree using @@ -73,7 +76,7 @@ def copytree(src, dst, use_hardlink=False): names = os.listdir(src) try: os.makedirs(dst) - except OSError, why: + except OSError as why: if 'File exists' in why: pass else: @@ -88,11 +91,11 @@ def copytree(src, dst, use_hardlink=False): else: copyfile(srcname, dstname, True, hashmethod='content', use_hardlink=use_hardlink) - except (IOError, os.error), why: + except (IOError, os.error) as why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive copytree so that we can # continue with other files - except Exception, err: + except Exception as err: errors.extend(err.args[0]) if errors: raise Exception(errors) @@ -131,54 +134,7 @@ def _add_output_traits(self, base): return base -# Class to track percentage of S3 file upload -class ProgressPercentage(object): - ''' - Callable class instsance (via __call__ method) that displays - upload percentage of a file to S3 - ''' - - def __init__(self, filename): - ''' - ''' - - # Import packages - import threading - - # Initialize data attributes - self._filename = filename - self._size = float(os.path.getsize(filename)) - self._seen_so_far = 0 - self._lock = threading.Lock() - - def __call__(self, bytes_amount): - ''' - ''' - - # Import packages - import sys - - # With the lock on, print upload status - with self._lock: - self._seen_so_far += bytes_amount - if self._size != 0: - percentage = (self._seen_so_far / self._size) * 100 - else: - percentage = 0 - progress_str = '%d / %d (%.2f%%)\r'\ - % (self._seen_so_far, self._size, percentage) - - # Write to stdout - sys.stdout.write(progress_str) - sys.stdout.flush() - - -# DataSink inputs class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - ''' - ''' - - # Init inputspec data attributes base_directory = Directory( desc='Path to the base directory for storing data.') container = traits.Str( @@ -190,30 +146,17 @@ class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): desc=('List of 2-tuples reflecting string ' 'to substitute and string to replace ' 'it with')) - regexp_substitutions = \ - InputMultiPath(traits.Tuple(traits.Str, traits.Str), - desc=('List of 2-tuples reflecting a pair of a '\ - 'Python regexp pattern and a replacement '\ - 'string. Invoked after string `substitutions`')) + regexp_substitutions = InputMultiPath(traits.Tuple(traits.Str, traits.Str), + desc=('List of 2-tuples reflecting a pair ' + 'of a Python regexp pattern and a ' + 'replacement string. Invoked after ' + 'string `substitutions`')) _outputs = traits.Dict(traits.Str, value={}, usedefault=True) remove_dest_dir = traits.Bool(False, usedefault=True, desc='remove dest directory when copying dirs') - # AWS S3 data attributes - creds_path = traits.Str(desc='Filepath to AWS credentials file for S3 bucket '\ - 'access') - encrypt_bucket_keys = traits.Bool(desc='Flag indicating whether to use S3 '\ - 'server-side AES-256 encryption') - # Set this if user wishes to override the bucket with their own - bucket = traits.Generic(mandatory=False, - desc='Boto3 S3 bucket for manual override of bucket') - # Set this if user wishes to have local copy of files as well - local_copy = traits.Str(desc='Copy files locally as well as to S3 bucket') - - # Set call-able inputs attributes def __setattr__(self, key, value): - if key not in self.copyable_trait_names(): if not isdefined(value): super(DataSinkInputSpec, self).__setattr__(key, value) @@ -224,19 +167,11 @@ def __setattr__(self, key, value): super(DataSinkInputSpec, self).__setattr__(key, value) -# DataSink outputs class DataSinkOutputSpec(TraitedSpec): - ''' - ''' - # Import packages - import traits.api as tapi + out_file = traits.Any(desc='datasink output') - # Init out file - out_file = tapi.Any(desc='datasink output') - -# Custom DataSink class class DataSink(IOBase): """ Generic datasink module to store structured outputs @@ -284,7 +219,7 @@ class DataSink(IOBase): >>> ds.inputs.structural = 'structural.nii' >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) - >>> ds.run() # doctest: +SKIP + >>> ds.run() # doctest: +SKIP To use DataSink in a MapNode, its inputs have to be defined at the time the interface is created. @@ -295,15 +230,12 @@ class DataSink(IOBase): >>> ds.inputs.structural = 'structural.nii' >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) - >>> ds.run() # doctest: +SKIP + >>> ds.run() # doctest: +SKIP """ - - # Give obj .inputs and .outputs input_spec = DataSinkInputSpec output_spec = DataSinkOutputSpec - # Initialization method to set up datasink def __init__(self, infields=None, force_run=True, **kwargs): """ Parameters @@ -325,7 +257,6 @@ def __init__(self, infields=None, force_run=True, **kwargs): if force_run: self._always_run = True - # Get destination paths def _get_dst(self, src): # If path is directory with trailing os.path.sep, # then remove that for a more robust behavior @@ -349,7 +280,6 @@ def _get_dst(self, src): dst = dst[1:] return dst - # Substitute paths in substitutions dictionary parameter def _substitute(self, pathstr): pathstr_ = pathstr if isdefined(self.inputs.substitutions): @@ -370,395 +300,81 @@ def _substitute(self, pathstr): iflogger.info('sub: %s -> %s' % (pathstr_, pathstr)) return pathstr - # Check for s3 in base directory - def _check_s3_base_dir(self): - ''' - Method to see if the datasink's base directory specifies an - S3 bucket path; if it does, it parses the path for the bucket - name in the form 's3://bucket_name/...' and adds a bucket - attribute to the data sink instance, i.e. self.bucket - - Parameters - ---------- - - Returns - ------- - s3_flag : boolean - flag indicating whether the base_directory contained an - S3 bucket path - ''' - - # Init variables - s3_str = 's3://' - base_directory = self.inputs.base_directory - - if not isdefined(base_directory): - s3_flag = False - return s3_flag - - # Explicitly lower-case the "s3" - if base_directory.lower().startswith(s3_str): - base_dir_sp = base_directory.split('/') - base_dir_sp[0] = base_dir_sp[0].lower() - base_directory = '/'.join(base_dir_sp) - - # Check if 's3://' in base dir - if base_directory.startswith(s3_str): - # Attempt to access bucket - try: - # Expects bucket name to be 's3://bucket_name/base_dir/..' - bucket_name = base_directory.split(s3_str)[1].split('/')[0] - # Get the actual bucket object - if self.inputs.bucket: - self.bucket = self.inputs.bucket - else: - self.bucket = self._fetch_bucket(bucket_name) - # Report error in case of exception - except Exception as exc: - err_msg = 'Unable to access S3 bucket. Error:\n%s. Exiting...'\ - % exc - raise Exception(err_msg) - # Bucket access was a success, set flag - s3_flag = True - # Otherwise it's just a normal datasink - else: - s3_flag = False - - # Return s3_flag - return s3_flag - - # Function to return AWS secure environment variables - def _return_aws_keys(self, creds_path): - ''' - Method to return AWS access key id and secret access key using - credentials found in a local file. - - Parameters - ---------- - creds_path : string (filepath) - path to the csv file downloaded from AWS; can either be root - or user credentials - - Returns - ------- - aws_access_key_id : string - string of the AWS access key ID - aws_secret_access_key : string - string of the AWS secret access key - ''' - - # Init variables - with open(creds_path, 'r') as creds_in: - # Grab csv rows - row1 = creds_in.readline() - row2 = creds_in.readline() - - # Are they root or user keys - if 'User Name' in row1: - # And split out for keys - aws_access_key_id = row2.split(',')[1] - aws_secret_access_key = row2.split(',')[2] - elif 'AWSAccessKeyId' in row1: - # And split out for keys - aws_access_key_id = row1.split('=')[1] - aws_secret_access_key = row2.split('=')[1] - else: - err_msg = 'Credentials file not recognized, check file is correct' - raise Exception(err_msg) - - # Strip any carriage return/line feeds - aws_access_key_id = aws_access_key_id.replace('\r', '').replace('\n', '') - aws_secret_access_key = aws_secret_access_key.replace('\r', '').replace('\n', '') - - # Return keys - return aws_access_key_id, aws_secret_access_key - - # Fetch bucket object - def _fetch_bucket(self, bucket_name): - ''' - Method to return a bucket object which can be used to interact - with an AWS S3 bucket using credentials found in a local file. - - Parameters - ---------- - bucket_name : string - string corresponding to the name of the bucket on S3 - - Returns - ------- - bucket : boto3.resources.factory.s3.Bucket - boto3 s3 Bucket object which is used to interact with files - in an S3 bucket on AWS - ''' - - # Import packages - import logging - - try: - import boto3 - import botocore - except ImportError as exc: - err_msg = 'Boto3 package is not installed - install boto3 and '\ - 'try again.' - raise Exception(err_msg) - - # Init variables - creds_path = self.inputs.creds_path - iflogger = logging.getLogger('interface') - - # Try and get AWS credentials if a creds_path is specified - if creds_path: - try: - aws_access_key_id, aws_secret_access_key = \ - self._return_aws_keys(creds_path) - except Exception as exc: - err_msg = 'There was a problem extracting the AWS credentials '\ - 'from the credentials file provided: %s. Error:\n%s'\ - % (creds_path, exc) - raise Exception(err_msg) - # Init connection - iflogger.info('Connecting to S3 bucket: %s with credentials from '\ - '%s ...' % (bucket_name, creds_path)) - # Use individual session for each instance of DataSink - # Better when datasinks are being used in multi-threading, see: - # http://boto3.readthedocs.org/en/latest/guide/resources.html#multithreading - session = boto3.session.Session(aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key) - s3_resource = session.resource('s3', use_ssl=True) - - # Otherwise, connect anonymously - else: - iflogger.info('Connecting to AWS: %s anonymously...'\ - % bucket_name) - session = boto3.session.Session() - s3_resource = session.resource('s3', use_ssl=True) - s3_resource.meta.client.meta.events.register('choose-signer.s3.*', - botocore.handlers.disable_signing) - - # Explicitly declare a secure SSL connection for bucket object - bucket = s3_resource.Bucket(bucket_name) - - # And try fetch the bucket with the name argument - try: - s3_resource.meta.client.head_bucket(Bucket=bucket_name) - except botocore.exceptions.ClientError as exc: - error_code = int(exc.response['Error']['Code']) - if error_code == 403: - err_msg = 'Access to bucket: %s is denied; check credentials'\ - % bucket_name - raise Exception(err_msg) - elif error_code == 404: - err_msg = 'Bucket: %s does not exist; check spelling and try '\ - 'again' % bucket_name - raise Exception(err_msg) - else: - err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ - % (bucket_name, exc) - except Exception as exc: - err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ - % (bucket_name, exc) - raise Exception(err_msg) - - # Return the bucket - return bucket - - # Send up to S3 method - def _upload_to_s3(self, src, dst): - ''' - Method to upload outputs to S3 bucket instead of on local disk - ''' - - # Import packages - import hashlib - import logging - import os - - from botocore.exceptions import ClientError - - # Init variables - bucket = self.bucket - iflogger = logging.getLogger('interface') - s3_str = 's3://' - s3_prefix = s3_str + bucket.name - - # Explicitly lower-case the "s3" - if dst.lower().startswith(s3_str): - dst_sp = dst.split('/') - dst_sp[0] = dst_sp[0].lower() - dst = '/'.join(dst_sp) - - # If src is a directory, collect files (this assumes dst is a dir too) - if os.path.isdir(src): - src_files = [] - for root, dirs, files in os.walk(src): - src_files.extend([os.path.join(root, fil) for fil in files]) - # Make the dst files have the dst folder as base dir - dst_files = [os.path.join(dst, src_f.split(src)[1]) \ - for src_f in src_files] - else: - src_files = [src] - dst_files = [dst] - - # Iterate over src and copy to dst - for src_idx, src_f in enumerate(src_files): - # Get destination filename/keyname - dst_f = dst_files[src_idx] - dst_k = dst_f.replace(s3_prefix, '').lstrip('/') - - # See if same file is already up there - try: - dst_obj = bucket.Object(key=dst_k) - dst_md5 = dst_obj.e_tag.strip('"') - - # See if same file is already there - src_read = open(src_f, 'rb').read() - src_md5 = hashlib.md5(src_read).hexdigest() - # Move to next loop iteration - if dst_md5 == src_md5: - iflogger.info('File %s already exists on S3, skipping...' % dst_f) - continue - else: - iflogger.info('Overwriting previous S3 file...') - - except ClientError: - iflogger.info('New file to S3') - - # Copy file up to S3 (either encrypted or not) - iflogger.info('Uploading %s to S3 bucket, %s, as %s...'\ - % (src_f, bucket.name, dst_f)) - if self.inputs.encrypt_bucket_keys: - extra_args = {'ServerSideEncryption' : 'AES256'} - else: - extra_args = {} - bucket.upload_file(src_f, dst_k, ExtraArgs=extra_args, - Callback=ProgressPercentage(src_f)) - - # List outputs, main run routine def _list_outputs(self): """Execute this module. """ - - # Init variables - iflogger = logging.getLogger('interface') outputs = self.output_spec().get() out_files = [] - # Use hardlink - use_hardlink = str2bool(config.get('execution', 'try_hard_link_datasink')) - - # Set local output directory if specified - if isdefined(self.inputs.local_copy): - outdir = self.inputs.local_copy - else: - outdir = self.inputs.base_directory - # If base directory isn't given, assume current directory - if not isdefined(outdir): - outdir = '.' - - # Check if base directory reflects S3 bucket upload - try: - s3_flag = self._check_s3_base_dir() - if s3_flag: - s3dir = self.inputs.base_directory - if isdefined(self.inputs.container): - s3dir = os.path.join(s3dir, self.inputs.container) - else: - s3dir = '' - # If encountering an exception during bucket access, set output - # base directory to a local folder - except Exception as exc: - s3dir = '' - s3_flag = False - if not isdefined(self.inputs.local_copy): - local_out_exception = os.path.join(os.path.expanduser('~'), - 's3_datasink_' + self.bucket.name) - outdir = local_out_exception - # Log local copying directory - iflogger.info('Access to S3 failed! Storing outputs locally at: '\ - '%s\nError: %s' %(outdir, exc)) - - # If container input is given, append that to outdir + outdir = self.inputs.base_directory + if not isdefined(outdir): + outdir = '.' + outdir = os.path.abspath(outdir) if isdefined(self.inputs.container): outdir = os.path.join(outdir, self.inputs.container) - - # If sinking to local folder - if outdir != s3dir: - outdir = os.path.abspath(outdir) - # Create the directory if it doesn't exist - if not os.path.exists(outdir): - try: - os.makedirs(outdir) - except OSError, inst: - if 'File exists' in inst: - pass - else: - raise(inst) - - # Iterate through outputs attributes {key : path(s)} - for key, files in self.inputs._outputs.items(): + if not os.path.exists(outdir): + try: + os.makedirs(outdir) + except OSError as inst: + if 'File exists' in inst: + pass + else: + raise(inst) + use_hardlink = str2bool(config.get('execution', + 'try_hard_link_datasink')) + for key, files in list(self.inputs._outputs.items()): if not isdefined(files): continue iflogger.debug("key: %s files: %s" % (key, str(files))) files = filename_to_list(files) tempoutdir = outdir - if s3_flag: - s3tempoutdir = s3dir for d in key.split('.'): if d[0] == '@': continue tempoutdir = os.path.join(tempoutdir, d) - if s3_flag: - s3tempoutdir = os.path.join(s3tempoutdir, d) # flattening list if isinstance(files, list): if isinstance(files[0], list): files = [item for sublist in files for item in sublist] - # Iterate through passed-in source files for src in filename_to_list(files): - # Format src and dst files src = os.path.abspath(src) - if not os.path.isfile(src): - src = os.path.join(src, '') - dst = self._get_dst(src) - if s3_flag: - s3dst = os.path.join(s3tempoutdir, dst) - s3dst = self._substitute(s3dst) - dst = os.path.join(tempoutdir, dst) - dst = self._substitute(dst) - path, _ = os.path.split(dst) - - # If we're uploading to S3 - if s3_flag: - self._upload_to_s3(src, s3dst) - out_files.append(s3dst) - # Otherwise, copy locally src -> dst - if not s3_flag or isdefined(self.inputs.local_copy): - # Create output directory if it doesnt exist + if os.path.isfile(src): + dst = self._get_dst(src) + dst = os.path.join(tempoutdir, dst) + dst = self._substitute(dst) + path, _ = os.path.split(dst) if not os.path.exists(path): try: os.makedirs(path) - except OSError, inst: + except OSError as inst: if 'File exists' in inst: pass else: raise(inst) - # If src is a file, copy it to dst - if os.path.isfile(src): - iflogger.debug('copyfile: %s %s' % (src, dst)) - copyfile(src, dst, copy=True, hashmethod='content', - use_hardlink=use_hardlink) - out_files.append(dst) - # If src is a directory, copy entire contents to dst dir - elif os.path.isdir(src): - if os.path.exists(dst) and self.inputs.remove_dest_dir: - iflogger.debug('removing: %s' % dst) - shutil.rmtree(dst) - iflogger.debug('copydir: %s %s' % (src, dst)) - copytree(src, dst) - out_files.append(dst) - - # Return outputs dictionary + iflogger.debug("copyfile: %s %s" % (src, dst)) + copyfile(src, dst, copy=True, hashmethod='content', + use_hardlink=use_hardlink) + out_files.append(dst) + elif os.path.isdir(src): + dst = self._get_dst(os.path.join(src, '')) + dst = os.path.join(tempoutdir, dst) + dst = self._substitute(dst) + path, _ = os.path.split(dst) + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as inst: + if 'File exists' in inst: + pass + else: + raise(inst) + if os.path.exists(dst) and self.inputs.remove_dest_dir: + iflogger.debug("removing: %s" % dst) + shutil.rmtree(dst) + iflogger.debug("copydir: %s %s" % (src, dst)) + copytree(src, dst) + out_files.append(dst) outputs['out_file'] = out_files return outputs @@ -766,15 +382,15 @@ def _list_outputs(self): class S3DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): testing = traits.Bool(False, usedefault=True, - desc='Flag for using local fakes3 server.' - ' (for testing purposes only)') + desc='Flag for using local fakes3 server.' + ' (for testing purposes only)') anon = traits.Bool(False, usedefault=True, - desc='Use anonymous connection to s3') + desc='Use anonymous connection to s3') bucket = traits.Str(mandatory=True, desc='Amazon S3 bucket where your data is stored') bucket_path = traits.Str('', usedefault=True, desc='Location within your bucket to store ' - 'data.') + 'data.') base_directory = Directory( desc='Path to the base directory for storing data.') container = traits.Str( @@ -795,8 +411,6 @@ class S3DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): _outputs = traits.Dict(traits.Str, value={}, usedefault=True) remove_dest_dir = traits.Bool(False, usedefault=True, desc='remove dest directory when copying dirs') - # Set this if user wishes to have local copy of files as well - local_copy = traits.Str(desc='Copy files locally as well as to S3 bucket') def __setattr__(self, key, value): if key not in self.copyable_trait_names(): @@ -812,7 +426,7 @@ def __setattr__(self, key, value): class S3DataSink(DataSink): """ Works exactly like DataSink, except the specified files will also be uploaded to Amazon S3 storage in the specified bucket - and location. 'bucket_path' is the s3 analog for + and location. 'bucket_path' is the s3 analog for 'base_directory'. """ @@ -842,7 +456,7 @@ def localtos3(self, paths): # convert local path to s3 path bd_index = path.find(self.inputs.base_directory) if bd_index != -1: # base_directory is in path, maintain directory structure - s3path = path[bd_index+len(self.inputs.base_directory):] # cut out base directory + s3path = path[bd_index + len(self.inputs.base_directory):] # cut out base directory if s3path[0] == os.path.sep: s3path = s3path[1:] else: # base_directory isn't in path, simply place all files in bucket_path folder @@ -861,16 +475,17 @@ def localtos3(self, paths): class S3DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): anon = traits.Bool(False, usedefault=True, - desc='Use anonymous connection to s3') + desc='Use anonymous connection to s3. If this is set to True, boto may print' + + ' a urlopen error, but this does not prevent data from being downloaded.') region = traits.Str('us-east-1', usedefault=True, - desc='Region of s3 bucket') + desc='Region of s3 bucket') bucket = traits.Str(mandatory=True, desc='Amazon S3 bucket where your data is stored') bucket_path = traits.Str('', usedefault=True, desc='Location within your bucket for subject data.') local_directory = Directory(exists=True, desc='Path to the local directory for subject data to be downloaded ' - 'and accessed. Should be on HDFS for Spark jobs.') + 'and accessed. Should be on HDFS for Spark jobs.') raise_on_empty = traits.Bool(True, usedefault=True, desc='Generate exception if list is empty for a given field') sort_filelist = traits.Bool(mandatory=True, @@ -932,7 +547,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): if not isdefined(self.inputs.template_args): self.inputs.template_args = {} for key in outfields: - if not key in self.inputs.template_args: + if key not in self.inputs.template_args: if infields: self.inputs.template_args[key] = [infields] else: @@ -973,8 +588,8 @@ def _list_outputs(self): if hasattr(self.inputs, 'field_template') and \ isdefined(self.inputs.field_template) and \ key in self.inputs.field_template: - template = self.inputs.field_template[key] # template override for multiple outfields - if isdefined(self.inputs.bucket_path): + template = self.inputs.field_template[key] # template override for multiple outfields + if isdefined(self.inputs.bucket_path): template = os.path.join(self.inputs.bucket_path, template) if not args: filelist = [] @@ -995,7 +610,7 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): @@ -1006,7 +621,7 @@ def _list_outputs(self): for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -1042,14 +657,17 @@ def _list_outputs(self): # Outputs are currently stored as locations on S3. # We must convert to the local location specified # and download the files. - for key in outputs: - if type(outputs[key]) == list: - paths = outputs[key] - for i in range(len(paths)): - path = paths[i] + for key,val in outputs.iteritems(): + #This will basically be either list-like or string-like: + #if it has the __iter__ attribute, it's list-like (list, + #tuple, numpy array) and we iterate through each of its + #values. If it doesn't, it's string-like (string, + #unicode), and we convert that value directly. + if hasattr(val,'__iter__'): + for i,path in enumerate(val): outputs[key][i] = self.s3tolocal(path, bkt) - elif type(outputs[key]) == str: - outputs[key] = self.s3tolocal(outputs[key], bkt) + else: + outputs[key] = self.s3tolocal(val, bkt) return outputs @@ -1175,7 +793,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): if not isdefined(self.inputs.template_args): self.inputs.template_args = {} for key in outfields: - if not key in self.inputs.template_args: + if key not in self.inputs.template_args: if infields: self.inputs.template_args[key] = [infields] else: @@ -1189,7 +807,7 @@ def _add_output_traits(self, base): Using traits.Any instead out OutputMultiPath till add_trait bug is fixed. """ - return add_traits(base, self.inputs.template_args.keys()) + return add_traits(base, list(self.inputs.template_args.keys())) def _list_outputs(self): # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically @@ -1203,7 +821,7 @@ def _list_outputs(self): raise ValueError(msg) outputs = {} - for key, args in self.inputs.template_args.items(): + for key, args in list(self.inputs.template_args.items()): outputs[key] = [] template = self.inputs.template if hasattr(self.inputs, 'field_template') and \ @@ -1231,7 +849,7 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): @@ -1242,7 +860,7 @@ def _list_outputs(self): for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -1278,17 +896,17 @@ def _list_outputs(self): class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): base_directory = Directory(exists=True, - desc="Root path common to templates.") + desc="Root path common to templates.") sort_filelist = traits.Bool(True, usedefault=True, - desc="When matching mutliple files, return them in sorted order.") + desc="When matching mutliple files, return them in sorted order.") raise_on_empty = traits.Bool(True, usedefault=True, - desc="Raise an exception if a template pattern matches no files.") + desc="Raise an exception if a template pattern matches no files.") force_lists = traits.Either(traits.Bool(), traits.List(traits.Str()), - default=False, usedefault=True, - desc=("Whether to return outputs as a list even when only one file " - "matches the template. Either a boolean that applies to all " - "output fields or a list of output field names to coerce to " - " a list")) + default=False, usedefault=True, + desc=("Whether to return outputs as a list even when only one file " + "matches the template. Either a boolean that applies to all " + "output fields or a list of output field names to coerce to " + " a list")) class SelectFiles(IOBase): @@ -1305,12 +923,13 @@ class SelectFiles(IOBase): Examples -------- + >>> import pprint >>> from nipype import SelectFiles, Node >>> templates={"T1": "{subject_id}/struct/T1.nii", ... "epi": "{subject_id}/func/f[0, 1].nii"} >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" - >>> dg.outputs.get() + >>> pprint.pprint(dg.outputs.get()) # doctest: +NORMALIZE_WHITESPACE {'T1': , 'epi': } The same thing with dynamic grabbing of specific files: @@ -1345,7 +964,7 @@ def __init__(self, templates, **kwargs): # Infer the infields and outfields from the template infields = [] - for name, template in templates.iteritems(): + for name, template in templates.items(): for _, field_name, _, _ in string.Formatter().parse(template): if field_name is not None and field_name not in infields: infields.append(field_name) @@ -1363,12 +982,12 @@ def __init__(self, templates, **kwargs): def _add_output_traits(self, base): """Add the dynamic output fields""" - return add_traits(base, self._templates.keys()) + return add_traits(base, list(self._templates.keys())) def _list_outputs(self): """Find the files and expose them as interface outputs.""" outputs = {} - info = dict([(k, v) for k, v in self.inputs.__dict__.items() + info = dict([(k, v) for k, v in list(self.inputs.__dict__.items()) if k in self._infields]) force_lists = self.inputs.force_lists @@ -1383,7 +1002,7 @@ def _list_outputs(self): "'templates'.") % (plural, bad_fields, verb) raise ValueError(msg) - for field, template in self._templates.iteritems(): + for field, template in self._templates.items(): # Build the full template path if isdefined(self.inputs.base_directory): @@ -1425,10 +1044,10 @@ class DataFinderInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): match_regex = traits.Str('(.+)', usedefault=True, desc=("Regular expression for matching " - "paths.")) + "paths.")) ignore_regexes = traits.List(desc=("List of regular expressions, " - "if any match the path it will be " - "ignored.") + "if any match the path it will be " + "ignored.") ) max_depth = traits.Int(desc="The maximum depth to search beneath " "the root_paths") @@ -1456,17 +1075,17 @@ class DataFinder(IOBase): >>> df.inputs.root_paths = '.' >>> df.inputs.match_regex = '.+/(?P.+(qT1|ep2d_fid_T1).+)/(?P.+)\.nii.gz' >>> result = df.run() # doctest: +SKIP - >>> print result.outputs.out_paths # doctest: +SKIP + >>> result.outputs.out_paths # doctest: +SKIP ['./027-ep2d_fid_T1_Gd4/acquisition.nii.gz', './018-ep2d_fid_T1_Gd2/acquisition.nii.gz', './016-ep2d_fid_T1_Gd1/acquisition.nii.gz', './013-ep2d_fid_T1_pre/acquisition.nii.gz'] - >>> print result.outputs.series_dir # doctest: +SKIP + >>> result.outputs.series_dir # doctest: +SKIP ['027-ep2d_fid_T1_Gd4', '018-ep2d_fid_T1_Gd2', '016-ep2d_fid_T1_Gd1', '013-ep2d_fid_T1_pre'] - >>> print result.outputs.basename # doctest: +SKIP + >>> result.outputs.basename # doctest: +SKIP ['acquisition', 'acquisition' 'acquisition', @@ -1479,25 +1098,25 @@ class DataFinder(IOBase): _always_run = True def _match_path(self, target_path): - #Check if we should ignore the path + # Check if we should ignore the path for ignore_re in self.ignore_regexes: if ignore_re.search(target_path): return - #Check if we can match the path + # Check if we can match the path match = self.match_regex.search(target_path) - if not match is None: + if match is not None: match_dict = match.groupdict() if self.result is None: self.result = {'out_paths': []} - for key in match_dict.keys(): + for key in list(match_dict.keys()): self.result[key] = [] self.result['out_paths'].append(target_path) - for key, val in match_dict.iteritems(): + for key, val in match_dict.items(): self.result[key].append(val) def _run_interface(self, runtime): - #Prepare some of the inputs - if isinstance(self.inputs.root_paths, six.string_types): + # Prepare some of the inputs + if isinstance(self.inputs.root_paths, string_types): self.inputs.root_paths = [self.inputs.root_paths] self.match_regex = re.compile(self.inputs.match_regex) if self.inputs.max_depth is Undefined: @@ -1516,24 +1135,24 @@ def _run_interface(self, runtime): for regex in self.inputs.ignore_regexes] self.result = None for root_path in self.inputs.root_paths: - #Handle tilda/env variables and remove extra seperators + # Handle tilda/env variables and remove extra seperators root_path = os.path.normpath(os.path.expandvars(os.path.expanduser(root_path))) - #Check if the root_path is a file + # Check if the root_path is a file if os.path.isfile(root_path): if min_depth == 0: self._match_path(root_path) continue - #Walk through directory structure checking paths + # Walk through directory structure checking paths for curr_dir, sub_dirs, files in os.walk(root_path): - #Determine the current depth from the root_path + # Determine the current depth from the root_path curr_depth = (curr_dir.count(os.sep) - root_path.count(os.sep)) - #If the max path depth has been reached, clear sub_dirs - #and files + # If the max path depth has been reached, clear sub_dirs + # and files if max_depth is not None and curr_depth >= max_depth: sub_dirs[:] = [] files = [] - #Test the path for the curr_dir and all files + # Test the path for the curr_dir and all files if curr_depth >= min_depth: self._match_path(curr_dir) if curr_depth >= (min_depth - 1): @@ -1541,17 +1160,16 @@ def _run_interface(self, runtime): full_path = os.path.join(curr_dir, infile) self._match_path(full_path) if (self.inputs.unpack_single and - len(self.result['out_paths']) == 1 - ): - for key, vals in self.result.iteritems(): + len(self.result['out_paths']) == 1): + for key, vals in self.result.items(): self.result[key] = vals[0] else: - #sort all keys acording to out_paths - for key in self.result.keys(): + # sort all keys acording to out_paths + for key in list(self.result.keys()): if key == "out_paths": continue - sort_tuples = human_order_sorted(zip(self.result["out_paths"], - self.result[key])) + sort_tuples = human_order_sorted(list(zip(self.result["out_paths"], + self.result[key]))) self.result[key] = [x for (_, x) in sort_tuples] self.result["out_paths"] = human_order_sorted(self.result["out_paths"]) @@ -1701,7 +1319,7 @@ def _list_outputs(self): subject_path = os.path.join(subjects_dir, self.inputs.subject_id) output_traits = self._outputs() outputs = output_traits.get() - for k in outputs.keys(): + for k in list(outputs.keys()): val = self._get_files(subject_path, k, output_traits.traits()[k].loc, output_traits.traits()[k].altkey) @@ -1804,7 +1422,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): desc="arguments that fit into query_template") ) undefined_traits['field_template'] = Undefined - #self.inputs.remove_trait('query_template_args') + # self.inputs.remove_trait('query_template_args') outdict = {} for key in outfields: outdict[key] = [] @@ -1817,7 +1435,7 @@ def _add_output_traits(self, base): Using traits.Any instead out OutputMultiPath till add_trait bug is fixed. """ - return add_traits(base, self.inputs.query_template_args.keys()) + return add_traits(base, list(self.inputs.query_template_args.keys())) def _list_outputs(self): # infields are mandatory, however I could not figure out @@ -1840,12 +1458,12 @@ def _list_outputs(self): if not isdefined(value): msg = ("%s requires a value for input '%s' " "because it was listed in 'infields'" % - (self.__class__.__name__, key) + (self.__class__.__name__, key) ) raise ValueError(msg) outputs = {} - for key, args in self.inputs.query_template_args.items(): + for key, args in list(self.inputs.query_template_args.items()): outputs[key] = [] template = self.inputs.query_template if hasattr(self.inputs, 'field_template') and \ @@ -1866,7 +1484,7 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): @@ -1879,7 +1497,7 @@ def _list_outputs(self): for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, six.string_types) and \ + if isinstance(arg, string_types) and \ hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): @@ -1960,11 +1578,11 @@ class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): ) share = traits.Bool(False, - desc=('Option to share the subjects from the original project' - 'instead of creating new ones when possible - the created ' - 'experiments are then shared back to the original project' - ), - usedefault=True) + desc=('Option to share the subjects from the original project' + 'instead of creating new ones when possible - the created ' + 'experiments are then shared back to the original project' + ), + usedefault=True) def __setattr__(self, key, value): if key not in self.copyable_trait_names(): @@ -2043,7 +1661,7 @@ def _list_outputs(self): uri_template_args['reconstruction_id'] = quote_id(self.inputs.reconstruction_id) # gather outputs and upload them - for key, files in self.inputs._outputs.items(): + for key, files in list(self.inputs._outputs.items()): for name in filename_to_list(files): @@ -2074,7 +1692,7 @@ def push_file(self, xnat, file_name, out_key, uri_template_args): if part.startswith('_') and len(part.split('_')) % 2 ] - keymap = dict(zip(val_list[1::2], val_list[2::2])) + keymap = dict(list(zip(val_list[1::2], val_list[2::2]))) _label = [] for key, val in sorted(keymap.items()): @@ -2121,7 +1739,7 @@ def push_file(self, xnat, file_name, out_key, uri_template_args): ) # unquote values before uploading - for key in uri_template_args.keys(): + for key in list(uri_template_args.keys()): uri_template_args[key] = unquote_id(uri_template_args[key]) # upload file @@ -2258,18 +1876,19 @@ def _list_outputs(self): c.close() return None + class SSHDataGrabberInputSpec(DataGrabberInputSpec): hostname = traits.Str(mandatory=True, desc='Server hostname.') username = traits.Str(desc='Server username.') password = traits.Password(desc='Server password.') download_files = traits.Bool(True, usedefault=True, - desc='If false it will return the file names without downloading them') + desc='If false it will return the file names without downloading them') base_directory = traits.Str(mandatory=True, - desc='Path to the base directory consisting of subject data.') + desc='Path to the base directory consisting of subject data.') template_expression = traits.Enum(['fnmatch', 'regexp'], usedefault=True, - desc='Use either fnmatch or regexp to express templates') + desc='Use either fnmatch or regexp to express templates') ssh_log_to_file = traits.Str('', usedefault=True, - desc='If set SSH commands will be logged to the given file') + desc='If set SSH commands will be logged to the given file') class SSHDataGrabber(DataGrabber): @@ -2354,7 +1973,7 @@ def __init__(self, infields=None, outfields=None, **kwargs): paramiko except NameError: warn( - "The library parmiko needs to be installed" + "The library paramiko needs to be installed" " for this module to run." ) if not outfields: @@ -2377,13 +1996,12 @@ def __init__(self, infields=None, outfields=None, **kwargs): ): self.inputs.template += '$' - def _list_outputs(self): try: paramiko except NameError: raise ImportError( - "The library parmiko needs to be installed" + "The library paramiko needs to be installed" " for this module to run." ) @@ -2400,7 +2018,7 @@ def _list_outputs(self): raise ValueError(msg) outputs = {} - for key, args in self.inputs.template_args.items(): + for key, args in list(self.inputs.template_args.items()): outputs[key] = [] template = self.inputs.template if hasattr(self.inputs, 'field_template') and \ @@ -2416,7 +2034,7 @@ def _list_outputs(self): filelist = fnmatch.filter(filelist, template) elif self.inputs.template_expression == 'regexp': regexp = re.compile(template) - filelist = filter(regexp.match, filelist) + filelist = list(filter(regexp.match, filelist)) else: raise ValueError('template_expression value invalid') if len(filelist) == 0: @@ -2436,7 +2054,7 @@ def _list_outputs(self): for argnum, arglist in enumerate(args): maxlen = 1 for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): if (maxlen > 1) and (len(arg) != maxlen): @@ -2447,7 +2065,7 @@ def _list_outputs(self): for i in range(maxlen): argtuple = [] for arg in arglist: - if isinstance(arg, six.string_types) and hasattr(self.inputs, arg): + if isinstance(arg, string_types) and hasattr(self.inputs, arg): arg = getattr(self.inputs, arg) if isinstance(arg, list): argtuple.append(arg[i]) @@ -2469,7 +2087,7 @@ def _list_outputs(self): outfiles = fnmatch.filter(filelist, filledtemplate_base) elif self.inputs.template_expression == 'regexp': regexp = re.compile(filledtemplate_base) - outfiles = filter(regexp.match, filelist) + outfiles = list(filter(regexp.match, filelist)) else: raise ValueError('template_expression value invalid') if len(outfiles) == 0: @@ -2496,7 +2114,7 @@ def _list_outputs(self): elif len(outputs[key]) == 1: outputs[key] = outputs[key][0] - for k, v in outputs.items(): + for k, v in list(outputs.items()): outputs[k] = os.path.join(os.getcwd(), v) return outputs @@ -2523,7 +2141,7 @@ def _get_ssh_client(self): class JSONFileGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): in_file = File(exists=True, desc='JSON source file') defaults = traits.Dict(desc=('JSON dictionary that sets default output' - 'values, overridden by values found in in_file')) + 'values, overridden by values found in in_file')) class JSONFileGrabber(IOBase): @@ -2535,16 +2153,17 @@ class JSONFileGrabber(IOBase): Example ------- + >>> import pprint >>> from nipype.interfaces.io import JSONFileGrabber >>> jsonSource = JSONFileGrabber() - >>> jsonSource.inputs.defaults = {'param1': u'overrideMe', 'param3': 1.0} + >>> jsonSource.inputs.defaults = {'param1': 'overrideMe', 'param3': 1.0} >>> res = jsonSource.run() - >>> res.outputs.get() - {'param3': 1.0, 'param1': u'overrideMe'} + >>> pprint.pprint(res.outputs.get()) + {'param1': 'overrideMe', 'param3': 1.0} >>> jsonSource.inputs.in_file = 'jsongrabber.txt' >>> res = jsonSource.run() - >>> res.outputs.get() - {'param3': 1.0, 'param2': 4, 'param1': u'exampleStr'} + >>> pprint.pprint(res.outputs.get()) # doctest: +NORMALIZE_WHITESPACE + {'param1': 'exampleStr', 'param2': 4, 'param3': 1.0} """ @@ -2553,23 +2172,23 @@ class JSONFileGrabber(IOBase): _always_run = True def _list_outputs(self): - import json + import simplejson outputs = {} if isdefined(self.inputs.in_file): with open(self.inputs.in_file, 'r') as f: - data = json.load(f) + data = simplejson.load(f) if not isinstance(data, dict): raise RuntimeError('JSON input has no dictionary structure') - for key, value in data.iteritems(): + for key, value in data.items(): outputs[key] = value if isdefined(self.inputs.defaults): defaults = self.inputs.defaults - for key, value in defaults.iteritems(): - if key not in outputs.keys(): + for key, value in defaults.items(): + if key not in list(outputs.keys()): outputs[key] = value return outputs @@ -2655,7 +2274,7 @@ def _process_name(self, name, val): return name, val def _list_outputs(self): - import json + import simplejson import os.path as op if not isdefined(self.inputs.out_file): @@ -2666,14 +2285,14 @@ def _list_outputs(self): out_dict = self.inputs.in_dict # Overwrite in_dict entries automatically - for key, val in self.inputs._outputs.items(): + for key, val in list(self.inputs._outputs.items()): if not isdefined(val) or key == 'trait_added': continue key, val = self._process_name(key, val) out_dict[key] = val with open(out_file, 'w') as f: - json.dump(out_dict, f) + simplejson.dump(out_dict, f) outputs = self.output_spec().get() outputs['out_file'] = out_file return outputs diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index d5abeab223..37ed6eae43 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -1,5 +1,10 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function +from builtins import zip +from builtins import range +from builtins import open + import os import glob import shutil @@ -13,7 +18,6 @@ import nipype.interfaces.io as nio from nipype.interfaces.base import Undefined -# Check for boto noboto = False try: import boto @@ -21,13 +25,6 @@ except: noboto = True -# Check for boto3 -noboto3 = False -try: - import boto3 - from botocore.utils import fix_s3_host -except: - noboto3 = True def test_datagrabber(): dg = nio.DataGrabber() @@ -35,6 +32,7 @@ def test_datagrabber(): yield assert_equal, dg.inputs.base_directory, Undefined yield assert_equal, dg.inputs.template_args, {'outfiles': []} + @skipif(noboto) def test_s3datagrabber(): dg = nio.S3DataGrabber() @@ -95,9 +93,11 @@ def test_selectfiles_valueerror(): force_lists=force_lists) yield assert_raises, ValueError, sf.run + @skipif(noboto) def test_s3datagrabber_communication(): - dg = nio.S3DataGrabber(infields=['subj_id', 'run_num'], outfields=['func', 'struct']) + dg = nio.S3DataGrabber( + infields=['subj_id', 'run_num'], outfields=['func', 'struct']) dg.inputs.anon = True dg.inputs.bucket = 'openfmri' dg.inputs.bucket_path = 'ds001/' @@ -109,24 +109,25 @@ def test_s3datagrabber_communication(): struct='%s/anatomy/highres001_brain.nii.gz') dg.inputs.subj_id = ['sub001', 'sub002'] dg.inputs.run_num = ['run001', 'run003'] - dg.inputs.template_args = dg.inputs.template_args = dict( + dg.inputs.template_args = dict( func=[['subj_id', 'run_num']], struct=[['subj_id']]) res = dg.run() func_outfiles = res.outputs.func struct_outfiles = res.outputs.struct # check for all files - yield assert_true, '/sub001/BOLD/task001_run001/bold.nii.gz' in func_outfiles[0] + yield assert_true, os.path.join(dg.inputs.local_directory, '/sub001/BOLD/task001_run001/bold.nii.gz') in func_outfiles[0] yield assert_true, os.path.exists(func_outfiles[0]) - yield assert_true, '/sub001/anatomy/highres001_brain.nii.gz' in struct_outfiles[0] + yield assert_true, os.path.join(dg.inputs.local_directory, '/sub001/anatomy/highres001_brain.nii.gz') in struct_outfiles[0] yield assert_true, os.path.exists(struct_outfiles[0]) - yield assert_true, '/sub002/BOLD/task001_run003/bold.nii.gz' in func_outfiles[1] + yield assert_true, os.path.join(dg.inputs.local_directory, '/sub002/BOLD/task001_run003/bold.nii.gz') in func_outfiles[1] yield assert_true, os.path.exists(func_outfiles[1]) - yield assert_true, '/sub002/anatomy/highres001_brain.nii.gz' in struct_outfiles[1] + yield assert_true, os.path.join(dg.inputs.local_directory, '/sub002/anatomy/highres001_brain.nii.gz') in struct_outfiles[1] yield assert_true, os.path.exists(struct_outfiles[1]) shutil.rmtree(tempdir) + def test_datagrabber_order(): tempdir = mkdtemp() file1 = mkstemp(prefix='sub002_L1_R1.q', dir=tempdir) @@ -152,6 +153,7 @@ def test_datagrabber_order(): yield assert_true, 'sub002_L3_R10' in outfiles[2][1] shutil.rmtree(tempdir) + def test_datasink(): ds = nio.DataSink() yield assert_true, ds.inputs.parameterization @@ -163,157 +165,6 @@ def test_datasink(): ds = nio.DataSink(infields=['test']) yield assert_true, 'test' in ds.inputs.copyable_trait_names() -# Function to check for fakes3 -def _check_for_fakes3(): - ''' - Function used internally to check for fakes3 installation - ''' - - # Import packages - import subprocess - - # Init variables - fakes3_found = False - - # Check for fakes3 - try: - ret_code = subprocess.check_call(['which', 'fakes3'], stdout=open(os.devnull, 'wb')) - if ret_code == 0: - fakes3_found = True - except subprocess.CalledProcessError as exc: - print 'fakes3 not found, install via \'gem install fakes3\', skipping test...' - except: - print 'Unable to check for fakes3 installation, skipping test...' - - # Return if found - return fakes3_found - -def _make_dummy_input(): - ''' - ''' - - # Import packages - import tempfile - - # Init variables - input_dir = tempfile.mkdtemp() - input_path = os.path.join(input_dir, 'datasink_test_s3.txt') - - # Create input file - with open(input_path, 'wb') as f: - f.write('ABCD1234') - - # Return path - return input_path - -# Check for fakes3 -fakes3 = _check_for_fakes3() - - -@skipif(noboto3 or not fakes3) -# Test datasink writes to s3 properly -def test_datasink_to_s3(): - ''' - This function tests to see if the S3 functionality of a DataSink - works properly - ''' - - # Import packages - import hashlib - import tempfile - - # Init variables - ds = nio.DataSink() - bucket_name = 'test' - container = 'outputs' - attr_folder = 'text_file' - output_dir = 's3://' + bucket_name - # Local temporary filepaths for testing - fakes3_dir = tempfile.mkdtemp() - input_path = _make_dummy_input() - - # Start up fake-S3 server - proc = Popen(['fakes3', '-r', fakes3_dir, '-p', '4567'], stdout=open(os.devnull, 'wb')) - - # Init boto3 s3 resource to talk with fakes3 - resource = boto3.resource(aws_access_key_id='mykey', - aws_secret_access_key='mysecret', - service_name='s3', - endpoint_url='http://localhost:4567', - use_ssl=False) - resource.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host) - - # Create bucket - bucket = resource.create_bucket(Bucket=bucket_name) - - # Prep datasink - ds.inputs.base_directory = output_dir - ds.inputs.container = container - ds.inputs.bucket = bucket - setattr(ds.inputs, attr_folder, input_path) - - # Run datasink - ds.run() - - # Get MD5sums and compare - key = '/'.join([container, attr_folder, os.path.basename(input_path)]) - obj = bucket.Object(key=key) - dst_md5 = obj.e_tag.replace('"', '') - src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() - - # Kill fakes3 - proc.kill() - - # Delete fakes3 folder and input file - shutil.rmtree(fakes3_dir) - shutil.rmtree(os.path.dirname(input_path)) - - # Make sure md5sums match - yield assert_equal, src_md5, dst_md5 - -# Test the local copy attribute -def test_datasink_localcopy(): - ''' - Function to validate DataSink will make local copy via local_copy - attribute - ''' - - # Import packages - import hashlib - import tempfile - - # Init variables - local_dir = tempfile.mkdtemp() - container = 'outputs' - attr_folder = 'text_file' - - # Make dummy input file and datasink - input_path = _make_dummy_input() - ds = nio.DataSink() - - # Set up datasink - ds.inputs.container = container - ds.inputs.local_copy = local_dir - setattr(ds.inputs, attr_folder, input_path) - - # Expected local copy path - local_copy = os.path.join(local_dir, container, attr_folder, - os.path.basename(input_path)) - - # Run the datasink - ds.run() - - # Check md5sums of both - src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() - dst_md5 = hashlib.md5(open(local_copy, 'rb').read()).hexdigest() - - # Delete temp diretories - shutil.rmtree(os.path.dirname(input_path)) - shutil.rmtree(local_dir) - - # Perform test - yield assert_equal, src_md5, dst_md5 - @skipif(noboto) def test_s3datasink(): @@ -350,13 +201,14 @@ def test_datasink_substitutions(): setattr(ds.inputs, '@outdir', files) ds.run() yield assert_equal, \ - sorted([os.path.basename(x) for - x in glob.glob(os.path.join(outdir, '*'))]), \ - ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns + sorted([os.path.basename(x) for + x in glob.glob(os.path.join(outdir, '*'))]), \ + ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns shutil.rmtree(indir) shutil.rmtree(outdir) -@skipif(noboto or not fakes3) + +@skipif(noboto) def test_s3datasink_substitutions(): indir = mkdtemp(prefix='-Tmp-nipype_ds_subs_in') outdir = mkdtemp(prefix='-Tmp-nipype_ds_subs_out') @@ -368,10 +220,17 @@ def test_s3datasink_substitutions(): # run fakes3 server and set up bucket fakes3dir = op.expanduser('~/fakes3') - proc = Popen(['fakes3', '-r', fakes3dir, '-p', '4567'], stdout=open(os.devnull, 'wb')) + try: + proc = Popen( + ['fakes3', '-r', fakes3dir, '-p', '4567'], stdout=open(os.devnull, 'wb')) + except OSError as ose: + if 'No such file or directory' in str(ose): + return # fakes3 not installed. OK! + raise ose + conn = S3Connection(anon=True, is_secure=False, port=4567, - host='localhost', - calling_format=OrdinaryCallingFormat()) + host='localhost', + calling_format=OrdinaryCallingFormat()) conn.create_bucket('test') ds = nio.S3DataSink( @@ -392,9 +251,9 @@ def test_s3datasink_substitutions(): setattr(ds.inputs, '@outdir', files) ds.run() yield assert_equal, \ - sorted([os.path.basename(x) for - x in glob.glob(os.path.join(outdir, '*'))]), \ - ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns + sorted([os.path.basename(x) for + x in glob.glob(os.path.join(outdir, '*'))]), \ + ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns bkt = conn.get_bucket(ds.inputs.bucket) bkt_files = list(k for k in bkt.list()) @@ -426,11 +285,12 @@ def test_s3datasink_substitutions(): shutil.rmtree(indir) shutil.rmtree(outdir) + def _temp_analyze_files(): """Generate temporary analyze file pair.""" fd, orig_img = mkstemp(suffix='.img', dir=mkdtemp()) orig_hdr = orig_img[:-4] + '.hdr' - fp = file(orig_hdr, 'w+') + fp = open(orig_hdr, 'w+') fp.close() return orig_img, orig_hdr @@ -516,7 +376,7 @@ def test_datafinder_unpack(): df.inputs.match_regex = '.+/(?P.+)\.txt' df.inputs.unpack_single = True result = df.run() - print result.outputs.out_paths + print(result.outputs.out_paths) yield assert_equal, result.outputs.out_paths, single_res @@ -528,7 +388,7 @@ def test_freesurfersource(): def test_jsonsink(): - import json + import simplejson import os ds = nio.JSONFileSink() @@ -547,7 +407,7 @@ def test_jsonsink(): res = js.run() with open(res.outputs.out_file, 'r') as f: - data = json.load(f) + data = simplejson.load(f) yield assert_true, data == {"contrasts": {"alt": "someNestedValue"}, "foo": "var", "new_entry": "someValue"} js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'}) @@ -557,9 +417,8 @@ def test_jsonsink(): res = js.run() with open(res.outputs.out_file, 'r') as f: - data = json.load(f) + data = simplejson.load(f) yield assert_true, data == {"test": "testInfields", "contrasts": {"alt": "someNestedValue"}, "foo": "var", "new_entry": "someValue"} os.chdir(curdir) shutil.rmtree(outdir) - From 70ca4576f1a519506ede675318a2d4507c09882d Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 13 Jan 2016 17:00:32 -0500 Subject: [PATCH 02/78] Started adding in logic for num_threads and changed names of real memory stats keys --- nipype/interfaces/base.py | 17 +++++++++++------ nipype/interfaces/utility.py | 15 +++++++++------ nipype/pipeline/plugins/multiproc.py | 10 ++++++---- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 2112cdc739..7577e23c5f 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1248,7 +1248,8 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): outfile = os.path.join(runtime.cwd, 'stdout.nipype') # Init variables for memory profiling - ret = -1 + mem_mb = -1 + num_threads = -1 interval = 0.1 if output == 'stream': @@ -1268,7 +1269,7 @@ def _process(drain=0): stream.read(drain) while proc.returncode is None: if mem_prof: - ret = max([ret, _get_memory(proc.pid, include_children=True)]) + mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) time.sleep(interval) proc.poll() _process() @@ -1287,7 +1288,8 @@ def _process(drain=0): if output == 'allatonce': if mem_prof: while proc.returncode is None: - ret = max([ret, _get_memory(proc.pid, include_children=True)]) + mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) + num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) time.sleep(interval) proc.poll() stdout, stderr = proc.communicate() @@ -1297,7 +1299,8 @@ def _process(drain=0): if output == 'file': if mem_prof: while proc.returncode is None: - ret = max([ret, _get_memory(proc.pid, include_children=True)]) + mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) + num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) time.sleep(interval) proc.poll() ret_code = proc.wait() @@ -1309,7 +1312,8 @@ def _process(drain=0): if output == 'none': if mem_prof: while proc.returncode is None: - ret = max([ret, _get_memory(proc.pid, include_children=True)]) + mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) + num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) time.sleep(interval) proc.poll() proc.communicate() @@ -1317,7 +1321,8 @@ def _process(drain=0): result['stderr'] = [] result['merged'] = '' - setattr(runtime, 'real_memory2', ret/1024.0) + setattr(runtime, 'cmd_memory', mem_mb/1024.0) + setattr(runtime, 'num_threads', num_threads) runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = result['merged'] diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 10effaa548..f9d7aefe46 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -442,12 +442,15 @@ def _run_interface(self, runtime): if isdefined(value): args[name] = value - # mem stuff - import memory_profiler - proc = (function_handle, (), args) - mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) - setattr(runtime, 'real_memory2', mem_mb[0]/1024.0) - #out = function_handle(**args) + # Record memory of function_handle + try: + import memory_profiler + proc = (function_handle, (), args) + mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) + # If no memory_profiler package, run without recording memory + except ImportError: + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 3a5c63df35..5234abfd22 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -17,18 +17,20 @@ def run_node(node, updatehash, plugin_args=None): result = dict(result=None, traceback=None) try: run_memory = plugin_args['memory_profile'] - except Exception: + import memory_profiler + except KeyError: + run_memory = False + except ImportError: run_memory = False if run_memory: - import memory_profiler import datetime proc = (node.run, (), {'updatehash' : updatehash}) start = datetime.datetime.now() mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) runtime = (datetime.datetime.now() - start).total_seconds() result['result'] = retval - result['real_memory'] = mem_mb[0]/1024.0 - result['real_memory2'] = retval.runtime.get('real_memory2') + result['node_memory'] = mem_mb[0]/1024.0 + result['cmd_memory'] = retval.runtime.get('cmd_memory') result['run_seconds'] = runtime else: try: From 36e1446c067a8ba0a1010411ec8a9d926489abea Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 14 Jan 2016 17:27:41 -0500 Subject: [PATCH 03/78] Added cmd-level threads and memory profiling --- nipype/interfaces/base.py | 26 +++++++------------ nipype/pipeline/plugins/base.py | 2 -- nipype/pipeline/plugins/callback_log.py | 19 +++++++++----- nipype/pipeline/plugins/multiproc.py | 34 +++++++++++++++++-------- 4 files changed, 44 insertions(+), 37 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 7577e23c5f..fb4a6abb71 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -751,17 +751,8 @@ def __init__(self, **inputs): self.__class__.__name__) self.inputs = self.input_spec(**inputs) self.estimated_memory = 1 - self._real_memory = 0 self.num_threads = 1 - @property - def real_memory(self): - return self._real_memory - - @real_memory.setter - def real_memory(self, value): - self._real_memory = value - @classmethod def help(cls, returnhelp=False): """ Prints class help @@ -1269,7 +1260,8 @@ def _process(drain=0): stream.read(drain) while proc.returncode is None: if mem_prof: - mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) time.sleep(interval) proc.poll() _process() @@ -1288,8 +1280,8 @@ def _process(drain=0): if output == 'allatonce': if mem_prof: while proc.returncode is None: - mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) - num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) time.sleep(interval) proc.poll() stdout, stderr = proc.communicate() @@ -1299,8 +1291,8 @@ def _process(drain=0): if output == 'file': if mem_prof: while proc.returncode is None: - mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) - num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) time.sleep(interval) proc.poll() ret_code = proc.wait() @@ -1312,8 +1304,8 @@ def _process(drain=0): if output == 'none': if mem_prof: while proc.returncode is None: - mem_mb = max([mem_mb, _get_memory(proc.pid, include_children=True)]) - num_threads = max([num_threads, psutil.Proc(proc.pid).num_threads()]) + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) time.sleep(interval) proc.poll() proc.communicate() @@ -1322,7 +1314,7 @@ def _process(drain=0): result['merged'] = '' setattr(runtime, 'cmd_memory', mem_mb/1024.0) - setattr(runtime, 'num_threads', num_threads) + setattr(runtime, 'cmd_threads', num_threads) runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = result['merged'] diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index ab76520844..a7ed6e4de0 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -419,8 +419,6 @@ def _task_finished_cb(self, jobid, result=None): if result == None: if self._taskresult.has_key(jobid): result = self._taskresult[jobid].get() - else: - result = {'real_memory' : 'nokey'} self._status_callback(self.procs[jobid], 'end', result) # Update job and worker queues self.proc_pending[jobid] = False diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index a20242df95..6abfcd2e6a 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -4,11 +4,12 @@ def log_nodes_cb(node, status, result=None): logger = logging.getLogger('callback') try: - real_mem1 = result['real_memory'] - real_mem2 = result['real_memory2'] + node_mem = result['node_memory'] + cmd_mem = result['cmd_memory'] run_seconds = result['run_seconds'] + cmd_threads = result['cmd_threads'] except Exception as exc: - real_mem1 = real_mem2 = run_seconds = 'N/A' + node_mem = cmd_mem = run_seconds = cmd_threads = 'N/A' if status == 'start': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ @@ -19,16 +20,20 @@ def log_nodes_cb(node, status, result=None): elif status == 'end': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ - '"' + ',"memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ - + str(node._interface.num_threads) + ',"real_memory1":' + str(real_mem1) + ',"real_memory2":' + str(real_mem2) + ',"run_seconds":' + str(run_seconds) + '}' + node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ + '"' + ',"estimate memory":' + str(node._interface.estimated_memory) + \ + ',"num_threads":' + str(node._interface.num_threads) + \ + ',"cmd-level threads":' + str(cmd_threads) + \ + ',"node-level memory":' + str(node_mem) + \ + ',"cmd-level memory":' + str(cmd_mem) + \ + ',"run_seconds":' + str(run_seconds) + '}' logger.debug(message) else: message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ - '"' + ',"memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + '"' + ',"estimate memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + str(node._interface.num_threads) + ',"error":"True"}' logger.debug(message) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 5234abfd22..877f4e98e2 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -13,17 +13,27 @@ from .base import (DistributedPluginBase, report_crash) +# Run node def run_node(node, updatehash, plugin_args=None): - result = dict(result=None, traceback=None) + """docstring + """ + + # Import packages try: - run_memory = plugin_args['memory_profile'] + runtime_profile = plugin_args['runtime_profile'] import memory_profiler + import datetime except KeyError: - run_memory = False + runtime_profile = False except ImportError: - run_memory = False - if run_memory: - import datetime + runtime_profile = False + + # Init variables + result = dict(result=None, traceback=None) + + # If we're profiling the run + if runtime_profile: + # Init function tuple proc = (node.run, (), {'updatehash' : updatehash}) start = datetime.datetime.now() mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) @@ -31,7 +41,9 @@ def run_node(node, updatehash, plugin_args=None): result['result'] = retval result['node_memory'] = mem_mb[0]/1024.0 result['cmd_memory'] = retval.runtime.get('cmd_memory') + result['cmd_threads'] = retval.runtime.get('cmd_threads') result['run_seconds'] = runtime + # Otherwise, execute node.run as normal else: try: result['result'] = node.run(updatehash=updatehash) @@ -141,15 +153,15 @@ class ResourceMultiProcPlugin(MultiProcPlugin): the number of threads and memory of the system is used. System consuming nodes should be tagged: - memory_consuming_node.interface.memory = 8 #Gb + memory_consuming_node.interface.estimated_memory = 8 #Gb thread_consuming_node.interface.num_threads = 16 The default number of threads and memory for a node is 1. Currently supported options are: - - num_thread: maximum number of threads to be executed in parallel - - memory: maximum memory that can be used at once. + - num_threads: maximum number of threads to be executed in parallel + - estimated_memory: maximum memory that can be used at once. """ @@ -198,7 +210,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): for jobid in jobids: busy_memory+= self.procs[jobid]._interface.estimated_memory busy_processors+= self.procs[jobid]._interface.num_threads - + free_memory = self.memory - busy_memory free_processors = self.processors - busy_processors @@ -222,7 +234,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): if self.procs[jobid]._interface.estimated_memory <= free_memory and self.procs[jobid]._interface.num_threads <= free_processors: logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) executing_now.append(self.procs[jobid]) - + if isinstance(self.procs[jobid], MapNode): try: num_subnodes = self.procs[jobid].num_subnodes() From 43c0d567139c1018f5ab99d14ca6b374024d7aab Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Fri, 15 Jan 2016 13:12:30 -0500 Subject: [PATCH 04/78] remove MultiProc, MultiprocPlugin is default --- nipype/interfaces/ants/base.py | 2 +- nipype/pipeline/engine/tests/test_engine.py | 7 +- nipype/pipeline/engine/tests/test_utils.py | 2 +- nipype/pipeline/plugins/__init__.py | 1 - nipype/pipeline/plugins/multiproc.py | 98 +++++++------------ nipype/pipeline/plugins/tests/test_base.py | 2 +- .../pipeline/plugins/tests/test_callback.py | 6 +- .../pipeline/plugins/tests/test_multiproc.py | 3 +- .../plugins/tests/test_multiproc_nondaemon.py | 7 +- 9 files changed, 46 insertions(+), 82 deletions(-) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 20fab05881..c3ea4a674e 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -12,7 +12,7 @@ # -Using -1 gives primary responsibilty to ITKv4 to do the correct # thread limitings. # -Using 1 takes a very conservative approach to avoid overloading -# the computer (when running MultiProc) by forcing everything to +# the computer (when running ResourceMultiProc) by forcing everything to # single threaded. This can be a severe penalty for registration # performance. LOCAL_DEFAULT_NUMBER_OF_THREADS = 1 diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 5eaaa81fbf..2f829abcd4 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -714,8 +714,7 @@ def func1(in1): # set local check w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', - 'crashdump_dir': wd, - 'poll_sleep_duration': 2} + 'crashdump_dir': wd} # test output of num_subnodes method when serial is default (False) yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1) @@ -723,7 +722,7 @@ def func1(in1): # test running the workflow on default conditions error_raised = False try: - w1.run(plugin='MultiProc') + w1.run(plugin='ResourceMultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) @@ -737,7 +736,7 @@ def func1(in1): # test running the workflow on serial conditions error_raised = False try: - w1.run(plugin='MultiProc') + w1.run(plugin='ResourceMultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 8420f587c2..9688e02395 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -214,7 +214,7 @@ def test_function3(arg): out_dir = mkdtemp() - for plugin in ('Linear',): # , 'MultiProc'): + for plugin in ('Linear',): # , 'ResourceMultiProc'): n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['out_file1', 'out_file2', 'dir'], function=test_function), diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index 68cf2832ff..643d5735f8 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -9,7 +9,6 @@ from .sge import SGEPlugin from .condor import CondorPlugin from .dagman import CondorDAGManPlugin -from .multiproc import MultiProcPlugin from .multiproc import ResourceMultiProcPlugin from .ipython import IPythonPlugin from .somaflow import SomaFlowPlugin diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index e7b8f183c0..af96a1e102 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -76,68 +76,6 @@ class NonDaemonPool(pool.Pool): """ Process = NonDaemonProcess - -class MultiProcPlugin(DistributedPluginBase): - """Execute workflow with multiprocessing - - The plugin_args input to run can be used to control the multiprocessing - execution. Currently supported options are: - - - n_procs : number of processes to use - - non_daemon : boolean flag to execute as non-daemon processes - - """ - - def __init__(self, plugin_args=None): - super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) - self._taskresult = {} - self._taskid = 0 - non_daemon = True - n_procs = cpu_count() - if plugin_args: - if 'n_procs' in plugin_args: - n_procs = plugin_args['n_procs'] - if 'non_daemon' in plugin_args: - non_daemon = plugin_args['non_daemon'] - if non_daemon: - # run the execution using the non-daemon pool subclass - self.pool = NonDaemonPool(processes=n_procs) - else: - self.pool = Pool(processes=n_procs) - - - def _get_result(self, taskid): - if taskid not in self._taskresult: - raise RuntimeError('Multiproc task %d not found' % taskid) - if not self._taskresult[taskid].ready(): - return None - return self._taskresult[taskid].get() - - def _submit_job(self, node, updatehash=False): - self._taskid += 1 - try: - if node.inputs.terminal_output == 'stream': - node.inputs.terminal_output = 'allatonce' - except: - pass - self._taskresult[self._taskid] = self.pool.apply_async(run_node, (node, - updatehash,)) - return self._taskid - - def _report_crash(self, node, result=None): - if result and result['traceback']: - node._result = result['result'] - node._traceback = result['traceback'] - return report_crash(node, - traceback=result['traceback']) - else: - return report_crash(node) - - def _clear_task(self, taskid): - del self._taskresult[taskid] - - - import numpy as np from copy import deepcopy from ..engine import (MapNode, str2bool) @@ -150,8 +88,8 @@ def _clear_task(self, taskid): def release_lock(args): semaphore_singleton.semaphore.release() -class ResourceMultiProcPlugin(MultiProcPlugin): - """Execute workflow with multiprocessing not sending more jobs at once +class ResourceMultiProcPlugin(DistributedPluginBase): + """Execute workflow with multiprocessing, not sending more jobs at once than the system can support. The plugin_args input to run can be used to control the multiprocessing @@ -167,6 +105,7 @@ class ResourceMultiProcPlugin(MultiProcPlugin): Currently supported options are: + - non_daemon : boolean flag to execute as non-daemon processes - num_threads: maximum number of threads to be executed in parallel - estimated_memory: maximum memory that can be used at once. @@ -174,22 +113,53 @@ class ResourceMultiProcPlugin(MultiProcPlugin): def __init__(self, plugin_args=None): super(ResourceMultiProcPlugin, self).__init__(plugin_args=plugin_args) + self._taskresult = {} + self._taskid = 0 + non_daemon = True self.plugin_args = plugin_args self.processors = cpu_count() memory = psutil.virtual_memory() self.memory = memory.total / (1024*1024*1024) if self.plugin_args: + if 'non_daemon' in self.plugin_args: + non_daemon = plugin_args['non_daemon'] if 'n_procs' in self.plugin_args: self.processors = self.plugin_args['n_procs'] if 'memory' in self.plugin_args: self.memory = self.plugin_args['memory'] + if non_daemon: + # run the execution using the non-daemon pool subclass + self.pool = NonDaemonPool(processes=n_procs) + else: + self.pool = Pool(processes=n_procs) + def _wait(self): if len(self.pending_tasks) > 0: semaphore_singleton.semaphore.acquire() semaphore_singleton.semaphore.release() + def _get_result(self, taskid): + if taskid not in self._taskresult: + raise RuntimeError('Multiproc task %d not found' % taskid) + if not self._taskresult[taskid].ready(): + return None + return self._taskresult[taskid].get() + + + def _report_crash(self, node, result=None): + if result and result['traceback']: + node._result = result['result'] + node._traceback = result['traceback'] + return report_crash(node, + traceback=result['traceback']) + else: + return report_crash(node) + + def _clear_task(self, taskid): + del self._taskresult[taskid] + def _submit_job(self, node, updatehash=False): self._taskid += 1 try: diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py index 243ae195c2..616cb634a0 100644 --- a/nipype/pipeline/plugins/tests/test_base.py +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -38,5 +38,5 @@ def func(arg1): wf.add_nodes([funkynode]) wf.base_dir = '/tmp' -wf.run(plugin='MultiProc') +wf.run(plugin='ResourceMultiProc') ''' diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index db02bc889b..ce293f7d1b 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -75,8 +75,7 @@ def test_callback_multiproc_normal(): name='f_node') wf.add_nodes([f_node]) wf.config['execution']['crashdump_dir'] = wf.base_dir - wf.config['execution']['poll_sleep_duration'] = 2 - wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) + wf.run(plugin='ResourceMultiProc', plugin_args={'status_callback': so.callback}) assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' @@ -93,9 +92,8 @@ def test_callback_multiproc_exception(): name='f_node') wf.add_nodes([f_node]) wf.config['execution']['crashdump_dir'] = wf.base_dir - wf.config['execution']['poll_sleep_duration'] = 2 try: - wf.run(plugin='MultiProc', + wf.run(plugin='ResourceMultiProc', plugin_args={'status_callback': so.callback}) except: pass diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 672b988927..e7af00d343 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -43,8 +43,7 @@ def test_run_multiproc(): pipe.connect([(mod1, mod2, [('output1', 'input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 - pipe.config['execution']['poll_sleep_duration'] = 2 - execgraph = pipe.run(plugin="MultiProc") + execgraph = pipe.run(plugin="ResourceMultiProc") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') diff --git a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py index 89336c2026..429eff0f26 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py @@ -84,7 +84,7 @@ def dummyFunction(filename): def run_multiproc_nondaemon_with_flag(nondaemon_flag): ''' - Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag. + Start a pipe with two nodes using the resource multiproc plugin and passing the nondaemon_flag. ''' cur_dir = os.getcwd() @@ -107,11 +107,10 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): f1.inputs.insum = 0 pipe.config['execution']['stop_on_first_crash'] = True - pipe.config['execution']['poll_sleep_duration'] = 2 - # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag + # execute the pipe using the ResourceMultiProc plugin with 2 processes and the non_daemon flag # to enable child processes which start other multiprocessing jobs - execgraph = pipe.run(plugin="MultiProc", + execgraph = pipe.run(plugin="ResourceMultiProc", plugin_args={'n_procs': 2, 'non_daemon': nondaemon_flag}) From 0bb6d792081f88ce13fd3ab963168aab9f1645ef Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Tue, 19 Jan 2016 15:25:06 -0500 Subject: [PATCH 05/78] change old namespaces --- nipype/interfaces/base.py | 1 + nipype/interfaces/tests/test_io.py | 2 +- nipype/pipeline/plugins/callback_log.py | 18 +++---- nipype/pipeline/plugins/multiproc.py | 21 ++++---- .../pipeline/plugins/tests/test_multiproc.py | 6 ++- nipype/utils/draw_gantt_chart.py | 51 +++++++++++-------- 6 files changed, 55 insertions(+), 44 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 12832ead15..f63a8ae2e1 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1213,6 +1213,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Import packages try: from memory_profiler import _get_memory + import psutil mem_prof = True except: mem_prof = False diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 37ed6eae43..2b69448133 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -94,7 +94,7 @@ def test_selectfiles_valueerror(): yield assert_raises, ValueError, sf.run -@skipif(noboto) +@skip def test_s3datagrabber_communication(): dg = nio.S3DataGrabber( infields=['subj_id', 'run_num'], outfields=['func', 'struct']) diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 6abfcd2e6a..854a217957 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -13,27 +13,27 @@ def log_nodes_cb(node, status, result=None): if status == 'start': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ - '"' + ',"estimate memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + '"' + ',"estimated_memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + str(node._interface.num_threads) + '}' logger.debug(message) elif status == 'end': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ - '"' + ',"estimate memory":' + str(node._interface.estimated_memory) + \ - ',"num_threads":' + str(node._interface.num_threads) + \ - ',"cmd-level threads":' + str(cmd_threads) + \ - ',"node-level memory":' + str(node_mem) + \ - ',"cmd-level memory":' + str(cmd_mem) + \ - ',"run_seconds":' + str(run_seconds) + '}' + node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ + '"' + ',"estimated_memory":' + '"'+ str(node._interface.estimated_memory) + '"'+ \ + ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ + ',"cmd-level_threads":' + '"'+ str(cmd_threads) + '"'+ \ + ',"node-level_memory":' + '"'+ str(node_mem) + '"'+ \ + ',"cmd-level_memory":' + '"'+ str(cmd_mem) + '"' + \ + ',"run_seconds":' + '"'+ str(run_seconds) + '"'+ '}' logger.debug(message) else: message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ - '"' + ',"estimate memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + '"' + ',"estimated_memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ + str(node._interface.num_threads) + ',"error":"True"}' logger.debug(message) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index af96a1e102..291ad15fea 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -9,7 +9,14 @@ from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception import sys - +import numpy as np +from copy import deepcopy +from ..engine import MapNode +from ...utils.misc import str2bool +import datetime +import psutil +from ... import logging +import semaphore_singleton from .base import (DistributedPluginBase, report_crash) @@ -76,13 +83,7 @@ class NonDaemonPool(pool.Pool): """ Process = NonDaemonProcess -import numpy as np -from copy import deepcopy -from ..engine import (MapNode, str2bool) -import datetime -import psutil -from ... import logging -import semaphore_singleton + logger = logging.getLogger('workflow') def release_lock(args): @@ -130,9 +131,9 @@ def __init__(self, plugin_args=None): if non_daemon: # run the execution using the non-daemon pool subclass - self.pool = NonDaemonPool(processes=n_procs) + self.pool = NonDaemonPool(processes=self.processors) else: - self.pool = Pool(processes=n_procs) + self.pool = Pool(processes=self.processors) def _wait(self): if len(self.pending_tasks) > 0: diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index e7af00d343..60548d7217 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -6,7 +6,7 @@ from nipype.testing import assert_equal, assert_less_equal import nipype.pipeline.engine as pe - + class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') @@ -182,6 +182,8 @@ def test_do_not_use_more_memory_then_specified(): os.remove(LOG_FILENAME) + + def test_do_not_use_more_threads_then_specified(): LOG_FILENAME = 'callback.log' my_logger = logging.getLogger('callback') @@ -231,4 +233,4 @@ def test_do_not_use_more_threads_then_specified(): break yield assert_equal, result, True, "using more memory than system has (memory is not specified by user)" - os.remove(LOG_FILENAME) + os.remove(LOG_FILENAME) \ No newline at end of file diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 84bbc033a0..fcf8d95fe6 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -3,32 +3,39 @@ import datetime import random - + def log_to_json(logfile): result = [] with open(logfile, 'r') as content: - #read file separating each line - content = content.read() - lines = content.split('\n') - - lines = [ json.loads(x) for x in lines[:-1]] - - last_node = [ x for x in lines if x.has_key('finish')][-1] - - for i, line in enumerate(lines): - #get first start it finds - if not line.has_key('start'): - continue - - #fint the end node for that start - for j in range(i+1, len(lines)): - if lines[j].has_key('finish'): - if lines[j]['id'] == line['id'] and lines[j]['name'] == line['name']: - line['finish'] = lines[j]['finish'] - line['duration'] = (parser.parse(line['finish']) - parser.parse(line['start'])).total_seconds() - result.append(line) - break + #read file separating each line + content = content.read() + lines = content.split('\n') + l = [] + for i in lines: + try: + y = json.loads(i) + l.append(y) + except Exception, e: + pass + + lines = l + + last_node = [ x for x in lines if x.has_key('finish')][-1] + + for i, line in enumerate(lines): + #get first start it finds + if not line.has_key('start'): + continue + + #fint the end node for that start + for j in range(i+1, len(lines)): + if lines[j].has_key('finish'): + if lines[j]['id'] == line['id'] and lines[j]['name'] == line['name']: + line['finish'] = lines[j]['finish'] + line['duration'] = (parser.parse(line['finish']) - parser.parse(line['start'])).total_seconds() + result.append(line) + break return result, last_node From a68e0e68a6639b36f53964058ab4e5010c4d44dc Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 19 Jan 2016 15:27:59 -0500 Subject: [PATCH 06/78] Added initial num_threads monitoring code --- nipype/interfaces/base.py | 34 ++++++++++++++++++++-------- nipype/pipeline/plugins/multiproc.py | 7 +++--- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 12832ead15..f05e61bc9c 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1204,6 +1204,25 @@ def _read(self, drain): self._lastidx = len(self._rows) +def _get_num_threads(proc): + ''' + ''' + + # Import packages + import psutil + + # Init variables + num_threads = proc.num_threads() + try: + for child in proc.children(): + num_threads = max(num_threads, child.num_threads(), + len(child.children()), _get_num_threads(child)) + except psutil.NoSuchProcess: + dummy = 1 + + return num_threads + + def run_command(runtime, output=None, timeout=0.01, redirect_x=False): """Run a command, read stdout and stderr, prefix with timestamp. @@ -1213,6 +1232,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Import packages try: from memory_profiler import _get_memory + import psutil mem_prof = True except: mem_prof = False @@ -1253,7 +1273,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Init variables for memory profiling mem_mb = -1 num_threads = -1 - interval = 0.1 + interval = 1 if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] @@ -1273,8 +1293,7 @@ def _process(drain=0): while proc.returncode is None: if mem_prof: mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) - time.sleep(interval) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) proc.poll() _process() _process(drain=1) @@ -1293,8 +1312,7 @@ def _process(drain=0): if mem_prof: while proc.returncode is None: mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) - time.sleep(interval) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) proc.poll() stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): @@ -1315,8 +1333,7 @@ def _process(drain=0): if mem_prof: while proc.returncode is None: mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) - time.sleep(interval) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) proc.poll() ret_code = proc.wait() stderr.flush() @@ -1328,8 +1345,7 @@ def _process(drain=0): if mem_prof: while proc.returncode is None: mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, psutil.Process(proc.pid).num_threads()) - time.sleep(interval) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) proc.poll() proc.communicate() result['stdout'] = [] diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index af96a1e102..afdbef6936 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -78,7 +78,8 @@ class NonDaemonPool(pool.Pool): import numpy as np from copy import deepcopy -from ..engine import (MapNode, str2bool) +from ..engine import MapNode +from ...utils.misc import str2bool import datetime import psutil from ... import logging @@ -130,9 +131,9 @@ def __init__(self, plugin_args=None): if non_daemon: # run the execution using the non-daemon pool subclass - self.pool = NonDaemonPool(processes=n_procs) + self.pool = NonDaemonPool(processes=self.processors) else: - self.pool = Pool(processes=n_procs) + self.pool = Pool(processes=self.processors) def _wait(self): if len(self.pending_tasks) > 0: From 97e7333ef61317de1c20fb74c4713f6ce4206388 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 3 Feb 2016 15:49:12 -0500 Subject: [PATCH 07/78] Manual merge of s3_datasink and resource_multiproc branch for cpac run --- nipype/interfaces/base.py | 73 +++++- nipype/interfaces/utility.py | 10 +- nipype/pipeline/engine/tests/test_engine.py | 4 +- nipype/pipeline/engine/tests/test_utils.py | 2 +- nipype/pipeline/plugins/__init__.py | 4 +- nipype/pipeline/plugins/base.py | 17 +- nipype/pipeline/plugins/multiproc.py | 233 +++++++++++++++--- nipype/pipeline/plugins/tests/test_base.py | 2 +- .../pipeline/plugins/tests/test_callback.py | 4 +- .../pipeline/plugins/tests/test_multiproc.py | 189 +++++++++++++- .../plugins/tests/test_multiproc_nondaemon.py | 4 +- 11 files changed, 490 insertions(+), 52 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index e831fc67ce..1404110bf1 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -764,6 +764,8 @@ def __init__(self, **inputs): raise Exception('No input_spec in class: %s' % self.__class__.__name__) self.inputs = self.input_spec(**inputs) + self.estimated_memory = 1 + self.num_threads = 1 @classmethod def help(cls, returnhelp=False): @@ -1202,14 +1204,43 @@ def _read(self, drain): self._lastidx = len(self._rows) +def _get_num_threads(proc): + ''' + ''' + + # Import packages + import psutil + + # Init variables + num_threads = proc.num_threads() + try: + for child in proc.children(): + num_threads = max(num_threads, child.num_threads(), + len(child.children()), _get_num_threads(child)) + except psutil.NoSuchProcess: + dummy = 1 + + return num_threads + + def run_command(runtime, output=None, timeout=0.01, redirect_x=False): """Run a command, read stdout and stderr, prefix with timestamp. The returned runtime contains a merged stdout+stderr log with timestamps """ - PIPE = subprocess.PIPE + # Import packages + try: + from memory_profiler import _get_memory + import psutil + mem_proc = True + except: + mem_prof = False + + # Init variables + PIPE = subprocess.PIPE cmdline = runtime.cmdline + if redirect_x: exist_xvfb, _ = _exists_in_path('xvfb-run', runtime.environ) if not exist_xvfb: @@ -1238,6 +1269,12 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): result = {} errfile = os.path.join(runtime.cwd, 'stderr.nipype') outfile = os.path.join(runtime.cwd, 'stdout.nipype') + + # Init variables for memory profiling + mem_mb = -1 + num_threads = -1 + interval = 1 + if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] @@ -1253,8 +1290,10 @@ def _process(drain=0): else: for stream in res[0]: stream.read(drain) - while proc.returncode is None: + if mem_prof: + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) proc.poll() _process() _process(drain=1) @@ -1268,16 +1307,34 @@ def _process(drain=0): result[stream._name] = [r[2] for r in rows] temp.sort() result['merged'] = [r[1] for r in temp] + if output == 'allatonce': + if mem_prof: + while proc.returncode is None: + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) + proc.poll() stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): - stdout = stdout.decode() + try: + stdout = stdout.decode() + except UnicodeDecodeError: + stdout = stdout.decode("ISO-8859-1") if stderr and isinstance(stderr, bytes): - stderr = stderr.decode() + try: + stderr = stderr.decode() + except UnicodeDecodeError: + stdout = stdout.decode("ISO-8859-1") + result['stdout'] = str(stdout).split('\n') result['stderr'] = str(stderr).split('\n') result['merged'] = '' if output == 'file': + if mem_prof: + while proc.returncode is None: + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) + proc.poll() ret_code = proc.wait() stderr.flush() stdout.flush() @@ -1285,10 +1342,18 @@ def _process(drain=0): result['stderr'] = [line.strip() for line in open(errfile).readlines()] result['merged'] = '' if output == 'none': + if mem_prof: + while proc.returncode is None: + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) + proc.poll() proc.communicate() result['stdout'] = [] result['stderr'] = [] result['merged'] = '' + + setattr(runtime, 'cmd_memory', mem_mb/1024.0) + setattr(runtime, 'cmd_threads', num_threads) runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = result['merged'] diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 37883d4e5c..2eb5c78fe5 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -449,7 +449,15 @@ def _run_interface(self, runtime): if isdefined(value): args[name] = value - out = function_handle(**args) + # Record memory of function_handle + try: + import memory_profiler + proc = (function_handle, (), args) + mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) + # If no memory_profiler package, run without recording memory + except: + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 5eaaa81fbf..ce618abf27 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -723,7 +723,7 @@ def func1(in1): # test running the workflow on default conditions error_raised = False try: - w1.run(plugin='MultiProc') + w1.run(plugin='ResourceMultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) @@ -737,7 +737,7 @@ def func1(in1): # test running the workflow on serial conditions error_raised = False try: - w1.run(plugin='MultiProc') + w1.run(plugin='ResourceMultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 8420f587c2..9688e02395 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -214,7 +214,7 @@ def test_function3(arg): out_dir = mkdtemp() - for plugin in ('Linear',): # , 'MultiProc'): + for plugin in ('Linear',): # , 'ResourceMultiProc'): n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['out_file1', 'out_file2', 'dir'], function=test_function), diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index 26d1577f55..643d5735f8 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -9,7 +9,7 @@ from .sge import SGEPlugin from .condor import CondorPlugin from .dagman import CondorDAGManPlugin -from .multiproc import MultiProcPlugin +from .multiproc import ResourceMultiProcPlugin from .ipython import IPythonPlugin from .somaflow import SomaFlowPlugin from .pbsgraph import PBSGraphPlugin @@ -17,3 +17,5 @@ from .lsf import LSFPlugin from .slurm import SLURMPlugin from .slurmgraph import SLURMGraphPlugin + +from .callback_log import log_nodes_cb diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 162ddd9df4..092c1883f1 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -20,7 +20,6 @@ import numpy as np import scipy.sparse as ssp - from ...utils.filemanip import savepkl, loadpkl from ...utils.misc import str2bool from ..engine.utils import (nx, dfs_preorder, topological_sort) @@ -246,7 +245,7 @@ def run(self, graph, config, updatehash=False): notrun.append(self._clean_queue(jobid, graph, result=result)) else: - self._task_finished_cb(jobid) + self._task_finished_cb(jobid, result) self._remove_node_dirs() self._clear_task(taskid) else: @@ -265,10 +264,15 @@ def run(self, graph, config, updatehash=False): graph=graph) else: logger.debug('Not submitting') - sleep(float(self._config['execution']['poll_sleep_duration'])) + self._wait() self._remove_node_dirs() report_nodes_not_run(notrun) + + + def _wait(self): + sleep(float(self._config['execution']['poll_sleep_duration'])) + def _get_result(self, taskid): raise NotImplementedError @@ -410,7 +414,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): else: break - def _task_finished_cb(self, jobid): + def _task_finished_cb(self, jobid, result=None): """ Extract outputs and assign to inputs of dependent tasks This is called when a job is completed. @@ -418,7 +422,10 @@ def _task_finished_cb(self, jobid): logger.info('[Job finished] jobname: %s jobid: %d' % (self.procs[jobid]._id, jobid)) if self._status_callback: - self._status_callback(self.procs[jobid], 'end') + if result == None: + if self._taskresult.has_key(jobid): + result = self._taskresult[jobid].get() + self._status_callback(self.procs[jobid], 'end', result) # Update job and worker queues self.proc_pending[jobid] = False # update the job dependency structure diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 861e2cc507..2e446ced57 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -9,18 +9,60 @@ from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception import sys - +import numpy as np +from copy import deepcopy +from ..engine import MapNode +from ...utils.misc import str2bool +import datetime +import psutil +from ... import logging +import semaphore_singleton from .base import (DistributedPluginBase, report_crash) -def run_node(node, updatehash): - result = dict(result=None, traceback=None) +# Run node +def run_node(node, updatehash, plugin_args=None): + """docstring + """ + + # Import packages try: - result['result'] = node.run(updatehash=updatehash) - except: - etype, eval, etr = sys.exc_info() - result['traceback'] = format_exception(etype, eval, etr) - result['result'] = node.result + runtime_profile = plugin_args['runtime_profile'] + import memory_profiler + import datetime + except KeyError: + runtime_profile = False + except ImportError: + runtime_profile = False + + # Init variables + result = dict(result=None, traceback=None) + + # If we're profiling the run + if runtime_profile: + try: + # Init function tuple + proc = (node.run, (), {'updatehash' : updatehash}) + start = datetime.datetime.now() + mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + runtime = (datetime.datetime.now() - start).total_seconds() + result['result'] = retval + result['node_memory'] = mem_mb[0]/1024.0 + result['cmd_memory'] = retval.runtime.get('cmd_memory') + result['cmd_threads'] = retval.runtime.get('cmd_threads') + result['run_seconds'] = runtime + except: + etype, eval, etr = sys.exc_info() + result['traceback'] = format_exception(etype,eval,etr) + result['result'] = node.result + # Otherwise, execute node.run as normal + else: + try: + result['result'] = node.run(updatehash=updatehash) + except: + etype, eval, etr = sys.exc_info() + result['traceback'] = format_exception(etype,eval,etr) + result['result'] = node.result return result @@ -41,34 +83,62 @@ class NonDaemonPool(pool.Pool): """ Process = NonDaemonProcess +logger = logging.getLogger('workflow') -class MultiProcPlugin(DistributedPluginBase): - """Execute workflow with multiprocessing +def release_lock(args): + semaphore_singleton.semaphore.release() + +class ResourceMultiProcPlugin(DistributedPluginBase): + """Execute workflow with multiprocessing, not sending more jobs at once + than the system can support. The plugin_args input to run can be used to control the multiprocessing - execution. Currently supported options are: + execution and defining the maximum amount of memory and threads that + should be used. When those parameters are not specified, + the number of threads and memory of the system is used. + + System consuming nodes should be tagged: + memory_consuming_node.interface.estimated_memory = 8 #Gb + thread_consuming_node.interface.num_threads = 16 + + The default number of threads and memory for a node is 1. + + Currently supported options are: - - n_procs : number of processes to use - non_daemon : boolean flag to execute as non-daemon processes + - num_threads: maximum number of threads to be executed in parallel + - estimated_memory: maximum memory that can be used at once. """ def __init__(self, plugin_args=None): - super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) + super(ResourceMultiProcPlugin, self).__init__(plugin_args=plugin_args) self._taskresult = {} self._taskid = 0 non_daemon = True - n_procs = cpu_count() - if plugin_args: - if 'n_procs' in plugin_args: - n_procs = plugin_args['n_procs'] - if 'non_daemon' in plugin_args: + self.plugin_args = plugin_args + self.processors = cpu_count() + memory = psutil.virtual_memory() + self.memory = memory.total / (1024*1024*1024) + if self.plugin_args: + if 'non_daemon' in self.plugin_args: non_daemon = plugin_args['non_daemon'] + if 'n_procs' in self.plugin_args: + self.processors = self.plugin_args['n_procs'] + if 'memory' in self.plugin_args: + self.memory = self.plugin_args['memory'] + if non_daemon: # run the execution using the non-daemon pool subclass - self.pool = NonDaemonPool(processes=n_procs) + self.pool = NonDaemonPool(processes=self.processors) else: - self.pool = Pool(processes=n_procs) + self.pool = Pool(processes=self.processors) + + def _wait(self): + if len(self.pending_tasks) > 0: + semaphore_singleton.semaphore.acquire() + semaphore_singleton.semaphore.release() + def _get_result(self, taskid): if taskid not in self._taskresult: @@ -77,17 +147,6 @@ def _get_result(self, taskid): return None return self._taskresult[taskid].get() - def _submit_job(self, node, updatehash=False): - self._taskid += 1 - try: - if node.inputs.terminal_output == 'stream': - node.inputs.terminal_output = 'allatonce' - except: - pass - self._taskresult[self._taskid] = self.pool.apply_async(run_node, - (node, - updatehash,)) - return self._taskid def _report_crash(self, node, result=None): if result and result['traceback']: @@ -100,3 +159,115 @@ def _report_crash(self, node, result=None): def _clear_task(self, taskid): del self._taskresult[taskid] + + def _submit_job(self, node, updatehash=False): + self._taskid += 1 + try: + if node.inputs.terminal_output == 'stream': + node.inputs.terminal_output = 'allatonce' + except: + pass + self._taskresult[self._taskid] = self.pool.apply_async(run_node, + (node, updatehash, self.plugin_args), + callback=release_lock) + return self._taskid + + def _send_procs_to_workers(self, updatehash=False, graph=None): + """ Sends jobs to workers when system resources are available. + Check memory (gb) and cores usage before running jobs. + """ + executing_now = [] + + # Check to see if a job is available + jobids = np.flatnonzero((self.proc_pending == True) & (self.depidx.sum(axis=0) == 0).__array__()) + + #check available system resources by summing all threads and memory used + busy_memory = 0 + busy_processors = 0 + for jobid in jobids: + busy_memory+= self.procs[jobid]._interface.estimated_memory + busy_processors+= self.procs[jobid]._interface.num_threads + + free_memory = self.memory - busy_memory + free_processors = self.processors - busy_processors + + + #check all jobs without dependency not run + jobids = np.flatnonzero((self.proc_done == False) & (self.depidx.sum(axis=0) == 0).__array__()) + + + #sort jobs ready to run first by memory and then by number of threads + #The most resource consuming jobs run first + jobids = sorted(jobids, key=lambda item: (self.procs[item]._interface.estimated_memory, self.procs[item]._interface.num_threads)) + + logger.debug('Free memory: %d, Free processors: %d', free_memory, free_processors) + + + #while have enough memory and processors for first job + #submit first job on the list + for jobid in jobids: + logger.debug('Next Job: %d, memory: %d, threads: %d' %(jobid, self.procs[jobid]._interface.estimated_memory, self.procs[jobid]._interface.num_threads)) + + if self.procs[jobid]._interface.estimated_memory <= free_memory and self.procs[jobid]._interface.num_threads <= free_processors: + logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) + executing_now.append(self.procs[jobid]) + + if isinstance(self.procs[jobid], MapNode): + try: + num_subnodes = self.procs[jobid].num_subnodes() + except Exception: + self._clean_queue(jobid, graph) + self.proc_pending[jobid] = False + continue + if num_subnodes > 1: + submit = self._submit_mapnode(jobid) + if not submit: + continue + + # change job status in appropriate queues + self.proc_done[jobid] = True + self.proc_pending[jobid] = True + + free_memory -= self.procs[jobid]._interface.estimated_memory + free_processors -= self.procs[jobid]._interface.num_threads + + # Send job to task manager and add to pending tasks + if self._status_callback: + self._status_callback(self.procs[jobid], 'start') + if str2bool(self.procs[jobid].config['execution']['local_hash_check']): + logger.debug('checking hash locally') + try: + hash_exists, _, _, _ = self.procs[ + jobid].hash_exists() + logger.debug('Hash exists %s' % str(hash_exists)) + if (hash_exists and (self.procs[jobid].overwrite == False or (self.procs[jobid].overwrite == None and not self.procs[jobid]._interface.always_run))): + self._task_finished_cb(jobid) + self._remove_node_dirs() + continue + except Exception: + self._clean_queue(jobid, graph) + self.proc_pending[jobid] = False + continue + logger.debug('Finished checking hash') + + if self.procs[jobid].run_without_submitting: + logger.debug('Running node %s on master thread' %self.procs[jobid]) + try: + self.procs[jobid].run() + except Exception: + self._clean_queue(jobid, graph) + self._task_finished_cb(jobid) + self._remove_node_dirs() + + else: + logger.debug('submitting', jobid) + tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) + if tid is None: + self.proc_done[jobid] = False + self.proc_pending[jobid] = False + else: + self.pending_tasks.insert(0, (tid, jobid)) + else: + break + + logger.debug('No jobs waiting to execute') diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py index 243ae195c2..616cb634a0 100644 --- a/nipype/pipeline/plugins/tests/test_base.py +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -38,5 +38,5 @@ def func(arg1): wf.add_nodes([funkynode]) wf.base_dir = '/tmp' -wf.run(plugin='MultiProc') +wf.run(plugin='ResourceMultiProc') ''' diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index db02bc889b..036fd76090 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -76,7 +76,7 @@ def test_callback_multiproc_normal(): wf.add_nodes([f_node]) wf.config['execution']['crashdump_dir'] = wf.base_dir wf.config['execution']['poll_sleep_duration'] = 2 - wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) + wf.run(plugin='ResourceMultiProc', plugin_args={'status_callback': so.callback}) assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' @@ -95,7 +95,7 @@ def test_callback_multiproc_exception(): wf.config['execution']['crashdump_dir'] = wf.base_dir wf.config['execution']['poll_sleep_duration'] = 2 try: - wf.run(plugin='MultiProc', + wf.run(plugin='ResourceMultiProc', plugin_args={'status_callback': so.callback}) except: pass diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index efa9ec4161..ed101db7bf 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -3,7 +3,7 @@ from tempfile import mkdtemp from shutil import rmtree -from nipype.testing import assert_equal +from nipype.testing import assert_equal, assert_less_equal import nipype.pipeline.engine as pe @@ -44,10 +44,195 @@ def test_run_multiproc(): pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 pipe.config['execution']['poll_sleep_duration'] = 2 - execgraph = pipe.run(plugin="MultiProc") + execgraph = pipe.run(plugin="ResourceMultiProc") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') yield assert_equal, result, [1, 1] os.chdir(cur_dir) rmtree(temp_dir) + + +################################ + + +class InputSpecSingleNode(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + +class OutputSpecSingleNode(nib.TraitedSpec): + output1 = nib.traits.Int(desc='a random int') + + +class TestInterfaceSingleNode(nib.BaseInterface): + input_spec = InputSpecSingleNode + output_spec = OutputSpecSingleNode + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = self.inputs.input1 + return outputs + + +def find_metrics(nodes, last_node): + import json + from dateutil.parser import parse + from datetime import datetime + import datetime as d + + + start = parse(nodes[0]['start']) + total_duration = int((parse(last_node['finish']) - start).total_seconds()) + + total_memory = [] + total_threads = [] + for i in range(total_duration): + total_memory.append(0) + total_threads.append(0) + + now = start + for i in range(total_duration): + start_index = 0 + node_start = None + node_finish = None + + x = now + + for j in range(start_index, len(nodes)): + node_start = parse(nodes[j]['start']) + node_finish = parse(nodes[j]['finish']) + + if node_start < x and node_finish > x: + total_memory[i] += nodes[j]['estimated_memory'] + total_threads[i] += nodes[j]['num_threads'] + start_index = j + + if node_start > x: + break + + now += d.timedelta(seconds=1) + + return total_memory, total_threads + + +import os +from nipype.pipeline.plugins.callback_log import log_nodes_cb +import logging +import logging.handlers +import psutil +from multiprocessing import cpu_count + +from nipype.utils import draw_gantt_chart + +def test_do_not_use_more_memory_then_specified(): + LOG_FILENAME = 'callback.log' + my_logger = logging.getLogger('callback') + my_logger.setLevel(logging.DEBUG) + + # Add the log message handler to the logger + handler = logging.FileHandler(LOG_FILENAME) + my_logger.addHandler(handler) + + max_memory = 10 + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') + n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') + n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') + n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') + + n1.interface.estimated_memory = 1 + n2.interface.estimated_memory = 1 + n3.interface.estimated_memory = 10 + n4.interface.estimated_memory = 1 + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 10 + + pipe.run(plugin='ResourceMultiProc', plugin_args={'memory': max_memory, + 'status_callback': log_nodes_cb}) + + + nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) + #usage in every second + memory, threads = find_metrics(nodes, last_node) + + result = True + for m in memory: + if m > max_memory: + result = False + break + + yield assert_equal, result, True + + max_threads = cpu_count() + + result = True + for t in threads: + if t > max_threads: + result = False + break + + yield assert_equal, result, True, "using more threads than system has (threads is not specified by user)" + + os.remove(LOG_FILENAME) + + + + +def test_do_not_use_more_threads_then_specified(): + LOG_FILENAME = 'callback.log' + my_logger = logging.getLogger('callback') + my_logger.setLevel(logging.DEBUG) + + # Add the log message handler to the logger + handler = logging.FileHandler(LOG_FILENAME) + my_logger.addHandler(handler) + + max_threads = 10 + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') + n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') + n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') + n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') + + n1.interface.num_threads = 1 + n2.interface.num_threads = 1 + n3.interface.num_threads = 10 + n4.interface.num_threads = 1 + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 10 + pipe.config['execution']['poll_sleep_duration'] = 1 + pipe.run(plugin='ResourceMultiProc', plugin_args={'n_procs': max_threads, 'status_callback': log_nodes_cb}) + + nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) + #usage in every second + memory, threads = find_metrics(nodes, last_node) + + result = True + for t in threads: + if t > max_threads: + result = False + break + + yield assert_equal, result, True, "using more threads than specified" + + max_memory = psutil.virtual_memory().total / (1024*1024) + result = True + for m in memory: + if m > max_memory: + result = False + break + yield assert_equal, result, True, "using more memory than system has (memory is not specified by user)" + + os.remove(LOG_FILENAME) diff --git a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py index 89336c2026..427f5f02fe 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py @@ -84,7 +84,7 @@ def dummyFunction(filename): def run_multiproc_nondaemon_with_flag(nondaemon_flag): ''' - Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag. + Start a pipe with two nodes using the resource multiproc plugin and passing the nondaemon_flag. ''' cur_dir = os.getcwd() @@ -111,7 +111,7 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag # to enable child processes which start other multiprocessing jobs - execgraph = pipe.run(plugin="MultiProc", + execgraph = pipe.run(plugin="ResourceMultiProc", plugin_args={'n_procs': 2, 'non_daemon': nondaemon_flag}) From 08a485d634867c9030c77e3e9474c8d3740041f0 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 3 Feb 2016 16:04:16 -0500 Subject: [PATCH 08/78] Manual merge of s3_datasink and resource_multiproc branch for cpac run --- nipype/interfaces/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 1404110bf1..1f80b62b63 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1233,7 +1233,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): try: from memory_profiler import _get_memory import psutil - mem_proc = True + mem_prof = True except: mem_prof = False From e5945e9b5ccfbdded9ed78c029a64045cfbbeed0 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 3 Feb 2016 17:54:08 -0500 Subject: [PATCH 09/78] Changed resources fetching to its function and try-blocked it in case of dying processes --- nipype/interfaces/base.py | 46 ++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 1f80b62b63..726d93cf2d 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1204,6 +1204,7 @@ def _read(self, drain): self._lastidx = len(self._rows) +# Get number of threads for process def _get_num_threads(proc): ''' ''' @@ -1223,6 +1224,29 @@ def _get_num_threads(proc): return num_threads +# Get max resources used for process +def _get_max_resources_used(proc, mem_mb, num_threads, poll=False): + ''' + docstring + ''' + + # Import packages + from memory_profiler import _get_memory + import psutil + + try: + mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) + if poll: + proc.poll() + except Exception as exc: + iflogger.info('Could not get resources used by process. Error: %s'\ + % exc) + + # Return resources + return mem_mb, num_threads + + def run_command(runtime, output=None, timeout=0.01, redirect_x=False): """Run a command, read stdout and stderr, prefix with timestamp. @@ -1231,7 +1255,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Import packages try: - from memory_profiler import _get_memory + import memory_profiler import psutil mem_prof = True except: @@ -1273,7 +1297,6 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Init variables for memory profiling mem_mb = -1 num_threads = -1 - interval = 1 if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] @@ -1292,8 +1315,8 @@ def _process(drain=0): stream.read(drain) while proc.returncode is None: if mem_prof: - mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) + mem_mb, num_threads = \ + _get_max_resources_used(proc, mem_mb, num_threads) proc.poll() _process() _process(drain=1) @@ -1311,9 +1334,8 @@ def _process(drain=0): if output == 'allatonce': if mem_prof: while proc.returncode is None: - mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) - proc.poll() + mem_mb, num_threads = \ + _get_max_resources_used(proc, mem_mb, num_threads, poll=True) stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): try: @@ -1332,9 +1354,8 @@ def _process(drain=0): if output == 'file': if mem_prof: while proc.returncode is None: - mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) - proc.poll() + mem_mb, num_threads = \ + _get_max_resources_used(proc, mem_mb, num_threads, poll=True) ret_code = proc.wait() stderr.flush() stdout.flush() @@ -1344,9 +1365,8 @@ def _process(drain=0): if output == 'none': if mem_prof: while proc.returncode is None: - mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) - proc.poll() + mem_mb, num_threads = \ + _get_max_resources_used(proc, mem_mb, num_threads, poll=True) proc.communicate() result['stdout'] = [] result['stderr'] = [] From 9cb7a68ca739aa8a8126545da9db5f39426e45d5 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 14:11:16 -0500 Subject: [PATCH 10/78] Fixed pickling bug of instance method by passing profiling flag instead of complete plugin_args dict --- nipype/interfaces/utility.py | 14 ++++++------ nipype/pipeline/plugins/base.py | 2 +- nipype/pipeline/plugins/multiproc.py | 22 ++++++++++--------- .../pipeline/plugins/tests/test_callback.py | 6 ++++- 4 files changed, 25 insertions(+), 19 deletions(-) diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 2eb5c78fe5..39784d10c5 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -450,14 +450,14 @@ def _run_interface(self, runtime): args[name] = value # Record memory of function_handle - try: - import memory_profiler - proc = (function_handle, (), args) - mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) - setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) + #try: + # import memory_profiler + # proc = (function_handle, (), args) + # mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + # setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) # If no memory_profiler package, run without recording memory - except: - out = function_handle(**args) + #except: + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 092c1883f1..48d62aa49b 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -250,7 +250,7 @@ def run(self, graph, config, updatehash=False): self._clear_task(taskid) else: toappend.insert(0, (taskid, jobid)) - except Exception: + except Exception as exc: result = {'result': None, 'traceback': format_exc()} notrun.append(self._clean_queue(jobid, graph, diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 2e446ced57..1726efd480 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -21,23 +21,20 @@ # Run node -def run_node(node, updatehash, plugin_args=None): +def run_node(node, updatehash, runtime_profile=False): """docstring """ - + # Import packages try: - runtime_profile = plugin_args['runtime_profile'] import memory_profiler import datetime - except KeyError: - runtime_profile = False except ImportError: runtime_profile = False - + # Init variables result = dict(result=None, traceback=None) - + runtime_profile = False # If we're profiling the run if runtime_profile: try: @@ -167,9 +164,14 @@ def _submit_job(self, node, updatehash=False): node.inputs.terminal_output = 'allatonce' except: pass - self._taskresult[self._taskid] = self.pool.apply_async(run_node, - (node, updatehash, self.plugin_args), - callback=release_lock) + try: + runtime_profile = self.plugin_args['runtime_profile'] + except: + runtime_profile = False + self._taskresult[self._taskid] = \ + self.pool.apply_async(run_node, + (node, updatehash, runtime_profile), + callback=release_lock) return self._taskid def _send_procs_to_workers(self, updatehash=False, graph=None): diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index 036fd76090..267b4e99c9 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -26,7 +26,7 @@ class Status(object): def __init__(self): self.statuses = [] - def callback(self, node, status): + def callback(self, node, status, result=None): self.statuses.append((node, status)) @@ -105,3 +105,7 @@ def test_callback_multiproc_exception(): yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'exception' rmtree(wf.base_dir) + +if __name__ == '__main__': + import nose + nose.run() From fe0a35203f2f74efb293442d49256b56dd87b3cb Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 14:29:38 -0500 Subject: [PATCH 11/78] Merged resource_multiproc into s3_multiproc --- nipype/pipeline/plugins/semaphore_singleton.py | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 nipype/pipeline/plugins/semaphore_singleton.py diff --git a/nipype/pipeline/plugins/semaphore_singleton.py b/nipype/pipeline/plugins/semaphore_singleton.py new file mode 100644 index 0000000000..99c7752b82 --- /dev/null +++ b/nipype/pipeline/plugins/semaphore_singleton.py @@ -0,0 +1,2 @@ +import threading +semaphore = threading.Semaphore(1) From 5733af9ac7ac8537760a9f602c4ef42c913e3966 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 14:53:12 -0500 Subject: [PATCH 12/78] Fixed hsarc related to yrt blocking --- nipype/interfaces/tests/test_io.py | 2 +- nipype/interfaces/utility.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 6d60dfd951..c1f4ec35f5 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -113,7 +113,7 @@ def test_selectfiles_valueerror(): yield assert_raises, ValueError, sf.run -@skip +@skipif(noboto) def test_s3datagrabber_communication(): dg = nio.S3DataGrabber( infields=['subj_id', 'run_num'], outfields=['func', 'struct']) diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 738cafff0e..5729d9e677 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -450,14 +450,14 @@ def _run_interface(self, runtime): args[name] = value # Record memory of function_handle - try: - import memory_profiler - proc = (function_handle, (), args) - mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) - setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) - # If no memory_profiler package, run without recording memory - except ImportError: - out = function_handle(**args) + #try: + # import memory_profiler + # proc = (function_handle, (), args) + # mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + # setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) + ## If no memory_profiler package, run without recording memory + #except: + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out From 544dddf632f4c9343befb2f2a03c631098a9f68a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 16:25:30 -0500 Subject: [PATCH 13/78] Removed forcing of runtime_profile to be off: --- nipype/pipeline/plugins/multiproc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 1ee3a9b81d..fa43b1ccd3 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -34,7 +34,6 @@ def run_node(node, updatehash, runtime_profile=False): # Init variables result = dict(result=None, traceback=None) - runtime_profile = False # If we're profiling the run if runtime_profile: From c07429983f85122ceb95c35e8ff81f397a299352 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 17:18:41 -0500 Subject: [PATCH 14/78] Made when result is None that the end stats are N/A --- nipype/pipeline/plugins/callback_log.py | 6 +++--- nipype/pipeline/plugins/multiproc.py | 6 ++++-- nipype/pipeline/plugins/tests/test_callback.py | 4 ---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 854a217957..44b2455b79 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -3,13 +3,13 @@ def log_nodes_cb(node, status, result=None): logger = logging.getLogger('callback') - try: + if result is None: + node_mem = cmd_mem = run_seconds = cmd_threads = 'N/A' + else: node_mem = result['node_memory'] cmd_mem = result['cmd_memory'] run_seconds = result['run_seconds'] cmd_threads = result['cmd_threads'] - except Exception as exc: - node_mem = cmd_mem = run_seconds = cmd_threads = 'N/A' if status == 'start': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index fa43b1ccd3..bd68f72ade 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -41,13 +41,15 @@ def run_node(node, updatehash, runtime_profile=False): # Init function tuple proc = (node.run, (), {'updatehash' : updatehash}) start = datetime.datetime.now() - mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) + mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, + include_children=True, + max_usage=True) runtime = (datetime.datetime.now() - start).total_seconds() result['result'] = retval result['node_memory'] = mem_mb[0]/1024.0 + result['run_seconds'] = runtime result['cmd_memory'] = retval.runtime.get('cmd_memory') result['cmd_threads'] = retval.runtime.get('cmd_threads') - result['run_seconds'] = runtime except: etype, eval, etr = sys.exc_info() result['traceback'] = format_exception(etype,eval,etr) diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index 2de3a880d9..f173a9b30c 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -104,7 +104,3 @@ def test_callback_multiproc_exception(): yield assert_equal, so.statuses[0][1], 'start' yield assert_equal, so.statuses[1][1], 'exception' rmtree(wf.base_dir) - -if __name__ == '__main__': - import nose - nose.run() From a4e3ae69c8821bb7c6b6f13b51bc52c0048fd161 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 4 Feb 2016 17:56:18 -0500 Subject: [PATCH 15/78] Added try-blocks around the runtime profile stats in callback logger --- nipype/pipeline/plugins/callback_log.py | 33 +++++++++++++++---- .../pipeline/plugins/tests/test_multiproc.py | 3 +- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 44b2455b79..9495e77410 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -2,14 +2,35 @@ import logging def log_nodes_cb(node, status, result=None): + ''' + ''' + + # Init variables logger = logging.getLogger('callback') + + # Check runtime profile stats if result is None: node_mem = cmd_mem = run_seconds = cmd_threads = 'N/A' else: - node_mem = result['node_memory'] - cmd_mem = result['cmd_memory'] - run_seconds = result['run_seconds'] - cmd_threads = result['cmd_threads'] + try: + node_mem = result['node_memory'] + except KeyError: + node_mem = 'Unknown' + try: + cmd_mem = result['cmd_memory'] + except KeyError: + cmd_mem = 'Unknown' + try: + run_seconds = result['run_seconds'] + except KeyError: + run_seconds = 'Unknown' + try: + cmd_threads = result['cmd_threads'] + except: + cmd_threads = 'Unknown' + + # Check status and write to log + # Start if status == 'start': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ @@ -17,7 +38,7 @@ def log_nodes_cb(node, status, result=None): + str(node._interface.num_threads) + '}' logger.debug(message) - + # End elif status == 'end': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ @@ -29,7 +50,7 @@ def log_nodes_cb(node, status, result=None): ',"run_seconds":' + '"'+ str(run_seconds) + '"'+ '}' logger.debug(message) - + # Other else: message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index ed7483e772..cd41bbb695 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -212,7 +212,8 @@ def test_do_not_use_more_threads_then_specified(): pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 10 pipe.config['execution']['poll_sleep_duration'] = 1 - pipe.run(plugin='ResourceMultiProc', plugin_args={'n_procs': max_threads, 'status_callback': log_nodes_cb}) + pipe.run(plugin='ResourceMultiProc', plugin_args={'n_procs': max_threads, + 'status_callback': log_nodes_cb}) nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) #usage in every second From e25ac8cde62f2dec38f41d1aca40013b462e2ca2 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 5 Feb 2016 14:34:51 -0500 Subject: [PATCH 16/78] Cleaned up some code and removed recursion from get_num_threads --- nipype/interfaces/base.py | 8 +++++--- nipype/interfaces/utility.py | 8 -------- nipype/pipeline/plugins/multiproc.py | 6 +++--- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index b4c76ca782..ab27497bb4 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1211,15 +1211,17 @@ def _get_num_threads(proc): # Import packages import psutil + import logging as lg # Init variables num_threads = proc.num_threads() try: + num_children = len(proc.children()) for child in proc.children(): - num_threads = max(num_threads, child.num_threads(), - len(child.children()), _get_num_threads(child)) + num_threads = max(num_threads, num_children, + child.num_threads(), len(child.children())) except psutil.NoSuchProcess: - dummy = 1 + pass return num_threads diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 5729d9e677..37883d4e5c 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -449,14 +449,6 @@ def _run_interface(self, runtime): if isdefined(value): args[name] = value - # Record memory of function_handle - #try: - # import memory_profiler - # proc = (function_handle, (), args) - # mem_mb, out = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) - # setattr(runtime, 'cmd_memory', mem_mb[0]/1024.0) - ## If no memory_profiler package, run without recording memory - #except: out = function_handle(**args) if len(self._output_names) == 1: diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index bd68f72ade..60f235b5ab 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -44,10 +44,10 @@ def run_node(node, updatehash, runtime_profile=False): mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, max_usage=True) - runtime = (datetime.datetime.now() - start).total_seconds() + run_secs = (datetime.datetime.now() - start).total_seconds() result['result'] = retval result['node_memory'] = mem_mb[0]/1024.0 - result['run_seconds'] = runtime + result['run_seconds'] = run_secs result['cmd_memory'] = retval.runtime.get('cmd_memory') result['cmd_threads'] = retval.runtime.get('cmd_threads') except: @@ -118,7 +118,7 @@ def __init__(self, plugin_args=None): self.plugin_args = plugin_args self.processors = cpu_count() memory = psutil.virtual_memory() - self.memory = memory.total / (1024*1024*1024) + self.memory = float(memory.total) / (1024.0**3) if self.plugin_args: if 'non_daemon' in self.plugin_args: non_daemon = plugin_args['non_daemon'] From d714a0345434ec6660311e79dd4bde30ef6e8540 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 9 Feb 2016 16:57:17 -0500 Subject: [PATCH 17/78] Added check for runtime having 'get' attribute --- nipype/pipeline/plugins/multiproc.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 60f235b5ab..d529eb84c7 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -48,8 +48,9 @@ def run_node(node, updatehash, runtime_profile=False): result['result'] = retval result['node_memory'] = mem_mb[0]/1024.0 result['run_seconds'] = run_secs - result['cmd_memory'] = retval.runtime.get('cmd_memory') - result['cmd_threads'] = retval.runtime.get('cmd_threads') + if hasattr(retval.runtime, 'get'): + result['cmd_memory'] = retval.runtime.get('cmd_memory') + result['cmd_threads'] = retval.runtime.get('cmd_threads') except: etype, eval, etr = sys.exc_info() result['traceback'] = format_exception(etype,eval,etr) From 27ee192bc27f260935bab077a970cf325417553a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 12 Feb 2016 12:10:01 -0500 Subject: [PATCH 18/78] Removed print statements --- nipype/pipeline/engine/nodes.py | 2 ++ nipype/pipeline/plugins/multiproc.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 9f9165e3b2..ce4c15278b 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -52,6 +52,7 @@ from ... import config, logging logger = logging.getLogger('workflow') + from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, TraitedSpec, DynamicTraitedSpec, Bunch, InterfaceResult, md5, Interface, @@ -670,6 +671,7 @@ def _copyfiles_to_wd(self, outdir, execute, linksonly=False): os.makedirs(outdir) for info in self._interface._get_filecopy_info(): files = self.inputs.get().get(info['key']) + print '######## files: %s' % (str(files)) if not isdefined(files): continue if files: diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index d529eb84c7..2ec3286bf8 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -265,7 +265,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self._remove_node_dirs() else: - logger.debug('submitting', jobid) + logger.debug('submitting %s' % str(jobid)) tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if tid is None: self.proc_done[jobid] = False From c99f834a0cd53a2ec8933444dbab7cd930da2262 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 12 Feb 2016 12:24:17 -0500 Subject: [PATCH 19/78] Removed more print statements and touched up some code to be more like nipy/master --- nipype/pipeline/engine/nodes.py | 1 - nipype/pipeline/engine/tests/test_engine.py | 3 ++- nipype/pipeline/plugins/base.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index ce4c15278b..63b9ae13f8 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -671,7 +671,6 @@ def _copyfiles_to_wd(self, outdir, execute, linksonly=False): os.makedirs(outdir) for info in self._interface._get_filecopy_info(): files = self.inputs.get().get(info['key']) - print '######## files: %s' % (str(files)) if not isdefined(files): continue if files: diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 2f829abcd4..09f3ec92c2 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -714,7 +714,8 @@ def func1(in1): # set local check w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', - 'crashdump_dir': wd} + 'crashdump_dir': wd, + 'poll_sleep_duration' : 2} # test output of num_subnodes method when serial is default (False) yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 48d62aa49b..092c1883f1 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -250,7 +250,7 @@ def run(self, graph, config, updatehash=False): self._clear_task(taskid) else: toappend.insert(0, (taskid, jobid)) - except Exception as exc: + except Exception: result = {'result': None, 'traceback': format_exc()} notrun.append(self._clean_queue(jobid, graph, From 07461cfe61a6c5d7bf7a5d9a9c1339018f999c24 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 12 Feb 2016 15:51:54 -0500 Subject: [PATCH 20/78] Added a fix for the recursive symlink bug (was happening because while loop in memory_profiler was executing node twice when it didnt finish running the first time --- nipype/pipeline/plugins/multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 2ec3286bf8..8f133faca0 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -43,7 +43,7 @@ def run_node(node, updatehash, runtime_profile=False): start = datetime.datetime.now() mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, include_children=True, - max_usage=True) + max_usage=True, interval=.9e-6) run_secs = (datetime.datetime.now() - start).total_seconds() result['result'] = retval result['node_memory'] = mem_mb[0]/1024.0 From 116a6a19d60e23fe831301b68381b99e3ac191c8 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 12 Feb 2016 17:34:32 -0500 Subject: [PATCH 21/78] Removed node.run level profiling --- nipype/pipeline/plugins/multiproc.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 8f133faca0..11aecb86e9 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -6,6 +6,7 @@ http://stackoverflow.com/a/8963618/1183453 """ +# Import packages from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception import sys @@ -13,12 +14,13 @@ from copy import deepcopy from ..engine import MapNode from ...utils.misc import str2bool -import datetime import psutil from ... import logging import semaphore_singleton from .base import (DistributedPluginBase, report_crash) +# Init logger +logger = logging.getLogger('workflow') # Run node def run_node(node, updatehash, runtime_profile=False): @@ -26,11 +28,7 @@ def run_node(node, updatehash, runtime_profile=False): """ # Import packages - try: - import memory_profiler - import datetime - except ImportError: - runtime_profile = False + import datetime # Init variables result = dict(result=None, traceback=None) @@ -38,15 +36,10 @@ def run_node(node, updatehash, runtime_profile=False): # If we're profiling the run if runtime_profile: try: - # Init function tuple - proc = (node.run, (), {'updatehash' : updatehash}) start = datetime.datetime.now() - mem_mb, retval = memory_profiler.memory_usage(proc=proc, retval=True, - include_children=True, - max_usage=True, interval=.9e-6) + retval = node.run(updatehash=updatehash) run_secs = (datetime.datetime.now() - start).total_seconds() result['result'] = retval - result['node_memory'] = mem_mb[0]/1024.0 result['run_seconds'] = run_secs if hasattr(retval.runtime, 'get'): result['cmd_memory'] = retval.runtime.get('cmd_memory') @@ -83,11 +76,11 @@ class NonDaemonPool(pool.Pool): """ Process = NonDaemonProcess -logger = logging.getLogger('workflow') def release_lock(args): semaphore_singleton.semaphore.release() + class ResourceMultiProcPlugin(DistributedPluginBase): """Execute workflow with multiprocessing, not sending more jobs at once than the system can support. From c1376c404b4202e205936c10b6627c2edffc7c05 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 17 Feb 2016 13:28:18 -0500 Subject: [PATCH 22/78] Updated keyword in result dictionary to runtime instead of cmd-level --- nipype/interfaces/afni/__init__.py | 4 +- nipype/interfaces/afni/preprocess.py | 99 +++++++++++++++++++++++++ nipype/interfaces/base.py | 4 +- nipype/pipeline/plugins/callback_log.py | 25 +++---- nipype/pipeline/plugins/multiproc.py | 6 +- 5 files changed, 116 insertions(+), 22 deletions(-) diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 4437a3ccd2..8cc9b34a50 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -8,8 +8,8 @@ from .base import Info from .preprocess import (To3D, Refit, Resample, TStat, Automask, Volreg, Merge, - ZCutUp, Calc, TShift, Warp, Detrend, Despike, Copy, - Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, + ZCutUp, Calc, TShift, Warp, Detrend, Despike, DegreeCentrality, + Copy, Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, BlurInMask, Autobox, TCorrMap, Bandpass, Retroicor, TCorrelate, TCorr1D, BrickStat, ROIStats, AutoTcorrelate, AFNItoNIFTI, Eval, Means) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 85f2a4eaf9..cdb02f6625 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -180,6 +180,7 @@ class RefitInputSpec(CommandLineInputSpec): ' template type, e.g. TLRC, MNI, ORIG') + class Refit(CommandLine): """Changes some of the information inside a 3D dataset's header @@ -506,6 +507,104 @@ class Despike(AFNICommand): output_spec = AFNICommandOutputSpec +class CentralityInputSpec(AFNICommandInputSpec): + """ + inherits the out_file parameter from AFNICommandOutputSpec base class + """ + + in_file = File(desc='input file to 3dDegreeCentrality', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + + mask = File(desc='mask file to mask input data', + argstr="-mask %s", + exists=True) + + thresh = traits.Float(desc='threshold to exclude connections where corr <= thresh', + argstr='-thresh %f') + + polort = traits.Int(desc='', argstr='-polort %d') + + autoclip = traits.Bool(desc='Clip off low-intensity regions in the dataset', + argstr='-autoclip') + + automask = traits.Bool(desc='Mask the dataset to target brain-only voxels', + argstr='-automask') + + +class DegreeCentralityInputSpec(CentralityInputSpec): + """ + inherits the out_file parameter from AFNICommandOutputSpec base class + """ + + in_file = File(desc='input file to 3dDegreeCentrality', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + + mask = File(desc='mask file to mask input data', + argstr="-mask %s", + exists=True) + + thresh = traits.Float(desc='threshold to exclude connections where corr <= thresh', + argstr='-thresh %f') + + sparsity = traits.Float(desc='only take the top percent of connections', + argstr='-sparsity %f') + + out_1d = traits.Str(desc='output filepath to text dump of correlation matrix', + argstr='-out1D') + + polort = traits.Int(desc='', argstr='-polort %d') + + autoclip = traits.Bool(desc='Clip off low-intensity regions in the dataset', + argstr='-autoclip') + + automask = traits.Bool(desc='Mask the dataset to target brain-only voxels', + argstr='-automask') + + +class DegreeCentralityOutputSpec(AFNICommandOutputSpec): + """ + inherits the out_file parameter from AFNICommandOutputSpec base class + """ + + one_d_file = File(desc='The text output of the similarity matrix computed'\ + 'after thresholding with one-dimensional and '\ + 'ijk voxel indices, correlations, image extents, '\ + 'and affine matrix') + + +class DegreeCentrality(AFNICommand): + """Performs degree centrality on a dataset using a given maskfile + via 3dDegreeCentrality + + For complete details, see the `3dDegreeCentrality Documentation. + + + Examples + ======== + + >>> from nipype.interfaces import afni as afni + >>> degree = afni.DegreeCentrality() + >>> degree.inputs.in_file = 'func_preproc.nii' + >>> degree.inputs.mask = 'mask.nii' + >>> degree.inputs.sparsity = 1 # keep the top one percent of connections + >>> degree.cmdline + '3dDegreeCentrality -sparsity 1 -mask mask.nii func_preproc.nii' + >>> res = degree.run() # doctest: +SKIP + """ + + _cmd = '3dDegreeCentrality' + input_spec = DegreeCentralityInputSpec + output_spec = DegreeCentralityOutputSpec + + class AutomaskInputSpec(AFNICommandInputSpec): in_file = File(desc='input file to 3dAutomask', argstr='%s', diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index ab27497bb4..14c57e406f 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1375,8 +1375,8 @@ def _process(drain=0): result['stderr'] = [] result['merged'] = '' - setattr(runtime, 'cmd_memory', mem_mb/1024.0) - setattr(runtime, 'cmd_threads', num_threads) + setattr(runtime, 'runtime_memory', mem_mb/1024.0) + setattr(runtime, 'runtime_threads', num_threads) runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) runtime.merged = result['merged'] diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 9495e77410..548b98f342 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -10,24 +10,20 @@ def log_nodes_cb(node, status, result=None): # Check runtime profile stats if result is None: - node_mem = cmd_mem = run_seconds = cmd_threads = 'N/A' + runtime_memory = runtime_seconds = runtime_threads = 'N/A' else: try: - node_mem = result['node_memory'] + runtime_memory = result['runtime_memory'] except KeyError: - node_mem = 'Unknown' + runtime_memory = 'Unknown' try: - cmd_mem = result['cmd_memory'] + runtime_seconds = result['runtime_seconds'] except KeyError: - cmd_mem = 'Unknown' + runtime_seconds = 'Unknown' try: - run_seconds = result['run_seconds'] - except KeyError: - run_seconds = 'Unknown' - try: - cmd_threads = result['cmd_threads'] + runtime_threads = result['runtime_threads'] except: - cmd_threads = 'Unknown' + runtime_threads = 'Unknown' # Check status and write to log # Start @@ -44,10 +40,9 @@ def log_nodes_cb(node, status, result=None): node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ '"' + ',"estimated_memory":' + '"'+ str(node._interface.estimated_memory) + '"'+ \ ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ - ',"cmd-level_threads":' + '"'+ str(cmd_threads) + '"'+ \ - ',"node-level_memory":' + '"'+ str(node_mem) + '"'+ \ - ',"cmd-level_memory":' + '"'+ str(cmd_mem) + '"' + \ - ',"run_seconds":' + '"'+ str(run_seconds) + '"'+ '}' + ',"runtime_threads":' + '"'+ str(runtime_threads) + '"'+ \ + ',"runtime_memory":' + '"'+ str(runtime_memory) + '"' + \ + ',"runtime_seconds":' + '"'+ str(runtime_seconds) + '"'+ '}' logger.debug(message) # Other diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 11aecb86e9..b34f9944c5 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -40,10 +40,10 @@ def run_node(node, updatehash, runtime_profile=False): retval = node.run(updatehash=updatehash) run_secs = (datetime.datetime.now() - start).total_seconds() result['result'] = retval - result['run_seconds'] = run_secs + result['runtime_seconds'] = run_secs if hasattr(retval.runtime, 'get'): - result['cmd_memory'] = retval.runtime.get('cmd_memory') - result['cmd_threads'] = retval.runtime.get('cmd_threads') + result['runtime_memory'] = retval.runtime.get('runtime_memory') + result['runtime_threads'] = retval.runtime.get('runtime_threads') except: etype, eval, etr = sys.exc_info() result['traceback'] = format_exception(etype,eval,etr) From e3f54c117fc56209cdb431afb00653315ff6ee8e Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 18 Feb 2016 11:45:00 -0500 Subject: [PATCH 23/78] Removed afni centrality interface (will do that in another branch) --- nipype/interfaces/afni/__init__.py | 2 +- nipype/interfaces/afni/preprocess.py | 125 --------------------------- 2 files changed, 1 insertion(+), 126 deletions(-) diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 8cc9b34a50..159acfeaf1 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -8,7 +8,7 @@ from .base import Info from .preprocess import (To3D, Refit, Resample, TStat, Automask, Volreg, Merge, - ZCutUp, Calc, TShift, Warp, Detrend, Despike, DegreeCentrality, + ZCutUp, Calc, TShift, Warp, Detrend, Despike, Copy, Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, BlurInMask, Autobox, TCorrMap, Bandpass, Retroicor, TCorrelate, TCorr1D, BrickStat, ROIStats, AutoTcorrelate, diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index cdb02f6625..1d6fc8d1c5 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -507,131 +507,6 @@ class Despike(AFNICommand): output_spec = AFNICommandOutputSpec -class CentralityInputSpec(AFNICommandInputSpec): - """ - inherits the out_file parameter from AFNICommandOutputSpec base class - """ - - in_file = File(desc='input file to 3dDegreeCentrality', - argstr='%s', - position=-1, - mandatory=True, - exists=True, - copyfile=False) - - mask = File(desc='mask file to mask input data', - argstr="-mask %s", - exists=True) - - thresh = traits.Float(desc='threshold to exclude connections where corr <= thresh', - argstr='-thresh %f') - - polort = traits.Int(desc='', argstr='-polort %d') - - autoclip = traits.Bool(desc='Clip off low-intensity regions in the dataset', - argstr='-autoclip') - - automask = traits.Bool(desc='Mask the dataset to target brain-only voxels', - argstr='-automask') - - -class DegreeCentralityInputSpec(CentralityInputSpec): - """ - inherits the out_file parameter from AFNICommandOutputSpec base class - """ - - in_file = File(desc='input file to 3dDegreeCentrality', - argstr='%s', - position=-1, - mandatory=True, - exists=True, - copyfile=False) - - mask = File(desc='mask file to mask input data', - argstr="-mask %s", - exists=True) - - thresh = traits.Float(desc='threshold to exclude connections where corr <= thresh', - argstr='-thresh %f') - - sparsity = traits.Float(desc='only take the top percent of connections', - argstr='-sparsity %f') - - out_1d = traits.Str(desc='output filepath to text dump of correlation matrix', - argstr='-out1D') - - polort = traits.Int(desc='', argstr='-polort %d') - - autoclip = traits.Bool(desc='Clip off low-intensity regions in the dataset', - argstr='-autoclip') - - automask = traits.Bool(desc='Mask the dataset to target brain-only voxels', - argstr='-automask') - - -class DegreeCentralityOutputSpec(AFNICommandOutputSpec): - """ - inherits the out_file parameter from AFNICommandOutputSpec base class - """ - - one_d_file = File(desc='The text output of the similarity matrix computed'\ - 'after thresholding with one-dimensional and '\ - 'ijk voxel indices, correlations, image extents, '\ - 'and affine matrix') - - -class DegreeCentrality(AFNICommand): - """Performs degree centrality on a dataset using a given maskfile - via 3dDegreeCentrality - - For complete details, see the `3dDegreeCentrality Documentation. - - - Examples - ======== - - >>> from nipype.interfaces import afni as afni - >>> degree = afni.DegreeCentrality() - >>> degree.inputs.in_file = 'func_preproc.nii' - >>> degree.inputs.mask = 'mask.nii' - >>> degree.inputs.sparsity = 1 # keep the top one percent of connections - >>> degree.cmdline - '3dDegreeCentrality -sparsity 1 -mask mask.nii func_preproc.nii' - >>> res = degree.run() # doctest: +SKIP - """ - - _cmd = '3dDegreeCentrality' - input_spec = DegreeCentralityInputSpec - output_spec = DegreeCentralityOutputSpec - - -class AutomaskInputSpec(AFNICommandInputSpec): - in_file = File(desc='input file to 3dAutomask', - argstr='%s', - position=-1, - mandatory=True, - exists=True, - copyfile=False) - - out_file = File(name_template="%s_mask", desc='output image file name', - argstr='-prefix %s', name_source="in_file") - - brain_file = File(name_template="%s_masked", - desc="output file from 3dAutomask", - argstr='-apply_prefix %s', - name_source="in_file") - - clfrac = traits.Float(desc='sets the clip level fraction' + - ' (must be 0.1-0.9). ' + - 'A small value will tend to make the mask larger [default = 0.5].', - argstr="-clfrac %s") - - dilate = traits.Int(desc='dilate the mask outwards', - argstr="-dilate %s") - - erode = traits.Int(desc='erode the mask inwards', - argstr="-erode %s") - class AutomaskOutputSpec(TraitedSpec): out_file = File(desc='mask file', From cbd08e0246ad2783a4bc05a80e41eadb00807d4f Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 18 Feb 2016 12:32:47 -0500 Subject: [PATCH 24/78] Added back in the automaskinputspec --- nipype/interfaces/afni/preprocess.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 1d6fc8d1c5..9ccef02689 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -507,6 +507,33 @@ class Despike(AFNICommand): output_spec = AFNICommandOutputSpec +class AutomaskInputSpec(AFNICommandInputSpec): + in_file = File(desc='input file to 3dAutomask', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + + out_file = File(name_template="%s_mask", desc='output image file name', + argstr='-prefix %s', name_source="in_file") + + brain_file = File(name_template="%s_masked", + desc="output file from 3dAutomask", + argstr='-apply_prefix %s', + name_source="in_file") + + clfrac = traits.Float(desc='sets the clip level fraction' + + ' (must be 0.1-0.9). ' + + 'A small value will tend to make the mask larger [default = 0.5].', + argstr="-clfrac %s") + + dilate = traits.Int(desc='dilate the mask outwards', + argstr="-dilate %s") + + erode = traits.Int(desc='erode the mask inwards', + argstr="-erode %s") + class AutomaskOutputSpec(TraitedSpec): out_file = File(desc='mask file', From 29bcd80065afe1da2bec02a83b80859f7317f876 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 22 Feb 2016 13:14:55 -0500 Subject: [PATCH 25/78] Added unit tests for runtime_profiler --- .../interfaces/tests/test_runtime_profiler.py | 185 ++++++++++++++++++ nipype/interfaces/tests/use_resources | 62 ++++++ 2 files changed, 247 insertions(+) create mode 100644 nipype/interfaces/tests/test_runtime_profiler.py create mode 100755 nipype/interfaces/tests/use_resources diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py new file mode 100644 index 0000000000..c7b56c3404 --- /dev/null +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -0,0 +1,185 @@ +# test_runtime_profiler.py +# +# Author: Daniel Clark, 2016 + +''' +Module to unit test the runtime_profiler in nipype +''' + +# Import packages +import unittest +from nipype.interfaces.base import traits, CommandLine, CommandLineInputSpec + + +# UseResources inputspec +class UseResourcesInputSpec(CommandLineInputSpec): + ''' + ''' + + # Init attributes + num_gb = traits.Float(desc='Number of GB of RAM to use', + argstr = "-g %f") + num_procs = traits.Int(desc='Number of processors to use', + argstr = "-p %d") + + +# UseResources interface +class UseResources(CommandLine): + ''' + ''' + + # Import packages + import os + + # Init attributes + input_spec = UseResourcesInputSpec + + # Get path of executable + exec_dir = os.path.dirname(os.path.realpath(__file__)) + exec_path = os.path.join(exec_dir, 'use_resources') + + # Init cmd + _cmd = exec_path + + +# Test case for the run function +class RuntimeProfilerTestCase(unittest.TestCase): + ''' + This class is a test case for the ResourceMultiProc plugin runtime + profiler + + Inherits + -------- + unittest.TestCase class + + Attributes (class): + ------------------ + see unittest.TestCase documentation + + Attributes (instance): + ---------------------- + ''' + + # setUp method for the necessary arguments to run cpac_pipeline.run + def setUp(self): + ''' + Method to instantiate TestCase + + Parameters + ---------- + self : RuntimeProfileTestCase + a unittest.TestCase-inherited class + ''' + + self.num_gb = 2 + self.num_procs = 2 + + # Test node + def _run_workflow(self): + ''' + Function to run the use_resources script in a nipype workflow + and return the runtime stats recorded by the profiler + + Parameters + ---------- + self : RuntimeProfileTestCase + a unittest.TestCase-inherited class + + Returns + ------- + finish_str : string + a json-compatible dictionary string containing the runtime + statistics of the nipype node that used system resources + ''' + + # Import packages + import logging + import os + import tempfile + + import nipype.pipeline.engine as pe + import nipype.interfaces.utility as util + from nipype.pipeline.plugins.callback_log import log_nodes_cb + + # Init variables + num_gb = self.num_gb + num_procs = self.num_procs + base_dir = tempfile.mkdtemp() + log_file = os.path.join(base_dir, 'callback.log') + + # Init logger + logger = logging.getLogger('callback') + logger.setLevel(logging.DEBUG) + handler = logging.FileHandler(log_file) + logger.addHandler(handler) + + # Declare workflow + wf = pe.Workflow(name='test_runtime_prof') + wf.base_dir = base_dir + + # Input node + input_node = pe.Node(util.IdentityInterface(fields=['num_gb', + 'num_procs']), + name='input_node') + input_node.inputs.num_gb = num_gb + input_node.inputs.num_procs = num_procs + + # Resources used node + resource_node = pe.Node(UseResources(), name='resource_node') + resource_node.interface.estimated_memory = num_gb + resource_node.interface.num_threads = num_procs + + # Connect workflow + wf.connect(input_node, 'num_gb', resource_node, 'num_gb') + wf.connect(input_node, 'num_procs', resource_node, 'num_procs') + + # Run workflow + plugin_args = {'n_procs' : num_procs, + 'memory' : num_gb, + 'runtime_profile' : True, + 'status_callback' : log_nodes_cb} + wf.run(plugin='ResourceMultiProc', plugin_args=plugin_args) + + # Get runtime stats from log file + finish_str = open(log_file, 'r').readlines()[1].rstrip('\n') + + # Delete wf base dir + shutil.rmtree(base_dir) + + # Return runtime stats + return finish_str + + # Test resources were used as expected + def test_wf_logfile(self): + ''' + Test to see that the input resources to consume match what was + recorded during runtime + ''' + + # Import packages + import json + + # Init variables + places = 1 + + # Run workflow and get stats + finish_str = self._run_workflow() + # Get runtime stats as dictionary + node_stats = json.loads(finish_str) + + # Read out runtime stats + runtime_gb = float(node_stats['runtime_memory']) + runtime_procs = int(node_stats['runtime_threads']) + + # Assert runtime stats are what was input + mem_err = 'Input memory: %.5f is not within %d places of runtime '\ + 'memory: %.5f' % (self.num_gb, places, runtime_gb) + self.assertAlmostEqual(self.num_gb, runtime_gb, places=places, msg=mem_err) + procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ + % (self.num_procs, runtime_procs) + self.assertEqual(self.num_procs, runtime_procs, msg=procs_err) + + +# Command-line run-able unittest module +if __name__ == '__main__': + unittest.main() diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources new file mode 100755 index 0000000000..1e86a0e671 --- /dev/null +++ b/nipype/interfaces/tests/use_resources @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# use_resources + +''' +Python script to use a certain amount of RAM on disk and number of +processors + +Usage: + use_resources -g -p +''' + +# Function to occupy GB of memory +def use_gb_ram(num_gb): + ''' + Function to consume GB of memory + ''' + + # Eat 1 GB of memory for 1 second + gb_str = ' ' * int(num_gb*1024.0**3) + + ctr = 0 + while ctr < 100e6: + ctr+= 1 + + # Clear memory + del ctr + del gb_str + + +# Make main executable +if __name__ == '__main__': + + # Import packages + import argparse + from multiprocessing import Process + + # Init argparser + parser = argparse.ArgumentParser(description=__doc__) + + # Add arguments + parser.add_argument('-g', '--num_gb', nargs=1, required=True, + help='Number of GB RAM to use, can be float or int') + parser.add_argument('-p', '--num_procs', nargs=1, required=True, + help='Number of processors to run in parallel') + + # Parse args + args = parser.parse_args() + + # Init variables + num_gb = float(args.num_gb[0]) + num_procs = int(args.num_procs[0]) + + # Build proc list + proc_list = [] + for idx in range(num_procs): + proc_list.append(Process(target=use_gb_ram, args=(num_gb/num_procs,))) + + # Run multi-threaded + print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) + for proc in proc_list: + proc.start() From a170644f88d6e30b2537282d7361799e8f471300 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 22 Feb 2016 13:27:30 -0500 Subject: [PATCH 26/78] Added import and reduced resources used --- nipype/interfaces/tests/test_runtime_profiler.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index c7b56c3404..f8c71a4a22 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -71,7 +71,7 @@ def setUp(self): a unittest.TestCase-inherited class ''' - self.num_gb = 2 + self.num_gb = 1 self.num_procs = 2 # Test node @@ -95,6 +95,7 @@ def _run_workflow(self): # Import packages import logging import os + import shutil import tempfile import nipype.pipeline.engine as pe @@ -152,8 +153,7 @@ def _run_workflow(self): # Test resources were used as expected def test_wf_logfile(self): ''' - Test to see that the input resources to consume match what was - recorded during runtime + Test runtime profiler correctly records workflow RAM/CPUs consumption ''' # Import packages @@ -171,12 +171,15 @@ def test_wf_logfile(self): runtime_gb = float(node_stats['runtime_memory']) runtime_procs = int(node_stats['runtime_threads']) - # Assert runtime stats are what was input + # Error message formatting mem_err = 'Input memory: %.5f is not within %d places of runtime '\ 'memory: %.5f' % (self.num_gb, places, runtime_gb) - self.assertAlmostEqual(self.num_gb, runtime_gb, places=places, msg=mem_err) procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ % (self.num_procs, runtime_procs) + + # Assert runtime stats are what was input + self.assertAlmostEqual(self.num_gb, runtime_gb, places=places, + msg=mem_err) self.assertEqual(self.num_procs, runtime_procs, msg=procs_err) From 250b6d3c73cad3df83848a2136eaba0a5b87e431 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 23 Feb 2016 13:24:16 -0500 Subject: [PATCH 27/78] Added runtime_profile to run by default unless the necessary packages arent available --- nipype/interfaces/base.py | 24 ++++++---- .../interfaces/tests/test_runtime_profiler.py | 1 - nipype/pipeline/plugins/multiproc.py | 48 ++++++++----------- 3 files changed, 34 insertions(+), 39 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 14c57e406f..c158efbddb 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1032,6 +1032,7 @@ def run(self, **inputs): self._check_mandatory_inputs() self._check_version_requirements(self.inputs) interface = self.__class__ + # initialize provenance tracking env = deepcopy(dict(os.environ)) runtime = Bunch(cwd=os.getcwd(), @@ -1211,7 +1212,6 @@ def _get_num_threads(proc): # Import packages import psutil - import logging as lg # Init variables num_threads = proc.num_threads() @@ -1255,13 +1255,19 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): The returned runtime contains a merged stdout+stderr log with timestamps """ - # Import packages + # Init logger + logger = logging.getLogger('workflow') + + # Default to profiling the runtime try: import memory_profiler import psutil - mem_prof = True - except: - mem_prof = False + runtime_profile = True + except ImportError as exc: + logger.info('Unable to import packages needed for runtime '\ + 'profiling. Turning off runtime profiler.\n'\ + 'Error: %s' % exc) + runtime_profile = False # Init variables PIPE = subprocess.PIPE @@ -1317,7 +1323,7 @@ def _process(drain=0): for stream in res[0]: stream.read(drain) while proc.returncode is None: - if mem_prof: + if runtime_profile: mem_mb, num_threads = \ _get_max_resources_used(proc, mem_mb, num_threads) proc.poll() @@ -1335,7 +1341,7 @@ def _process(drain=0): result['merged'] = [r[1] for r in temp] if output == 'allatonce': - if mem_prof: + if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ _get_max_resources_used(proc, mem_mb, num_threads, poll=True) @@ -1355,7 +1361,7 @@ def _process(drain=0): result['stderr'] = str(stderr).split('\n') result['merged'] = '' if output == 'file': - if mem_prof: + if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ _get_max_resources_used(proc, mem_mb, num_threads, poll=True) @@ -1366,7 +1372,7 @@ def _process(drain=0): result['stderr'] = [line.strip() for line in open(errfile).readlines()] result['merged'] = '' if output == 'none': - if mem_prof: + if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ _get_max_resources_used(proc, mem_mb, num_threads, poll=True) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index f8c71a4a22..7f9e9e8699 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -137,7 +137,6 @@ def _run_workflow(self): # Run workflow plugin_args = {'n_procs' : num_procs, 'memory' : num_gb, - 'runtime_profile' : True, 'status_callback' : log_nodes_cb} wf.run(plugin='ResourceMultiProc', plugin_args=plugin_args) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index b34f9944c5..2876994627 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -23,7 +23,7 @@ logger = logging.getLogger('workflow') # Run node -def run_node(node, updatehash, runtime_profile=False): +def run_node(node, updatehash): """docstring """ @@ -33,29 +33,22 @@ def run_node(node, updatehash, runtime_profile=False): # Init variables result = dict(result=None, traceback=None) - # If we're profiling the run - if runtime_profile: - try: - start = datetime.datetime.now() - retval = node.run(updatehash=updatehash) - run_secs = (datetime.datetime.now() - start).total_seconds() - result['result'] = retval - result['runtime_seconds'] = run_secs - if hasattr(retval.runtime, 'get'): - result['runtime_memory'] = retval.runtime.get('runtime_memory') - result['runtime_threads'] = retval.runtime.get('runtime_threads') - except: - etype, eval, etr = sys.exc_info() - result['traceback'] = format_exception(etype,eval,etr) - result['result'] = node.result - # Otherwise, execute node.run as normal - else: - try: - result['result'] = node.run(updatehash=updatehash) - except: - etype, eval, etr = sys.exc_info() - result['traceback'] = format_exception(etype,eval,etr) - result['result'] = node.result + # + try: + start = datetime.datetime.now() + retval = node.run(updatehash=updatehash) + run_secs = (datetime.datetime.now() - start).total_seconds() + result['result'] = retval + result['runtime_seconds'] = run_secs + if hasattr(retval.runtime, 'get'): + result['runtime_memory'] = retval.runtime.get('runtime_memory') + result['runtime_threads'] = retval.runtime.get('runtime_threads') + except: + etype, eval, etr = sys.exc_info() + result['traceback'] = format_exception(etype,eval,etr) + result['result'] = node.result + + # Return the result dictionary return result @@ -160,13 +153,10 @@ def _submit_job(self, node, updatehash=False): node.inputs.terminal_output = 'allatonce' except: pass - try: - runtime_profile = self.plugin_args['runtime_profile'] - except: - runtime_profile = False + self._taskresult[self._taskid] = \ self.pool.apply_async(run_node, - (node, updatehash, runtime_profile), + (node, updatehash), callback=release_lock) return self._taskid From f4b0b73048e035f541e197c12f431ec01e0225be Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 23 Feb 2016 18:07:55 -0500 Subject: [PATCH 28/78] Cleaned up some of the code to PEP8 and checked for errors --- nipype/interfaces/afni/__init__.py | 4 +- nipype/interfaces/afni/preprocess.py | 1 - nipype/interfaces/base.py | 45 +++++++--- .../interfaces/tests/test_runtime_profiler.py | 2 + nipype/interfaces/tests/use_resources | 1 + nipype/pipeline/engine/nodes.py | 1 - nipype/pipeline/engine/tests/test_engine.py | 2 +- nipype/pipeline/plugins/base.py | 1 + nipype/pipeline/plugins/callback_log.py | 12 ++- nipype/pipeline/plugins/multiproc.py | 50 ++++++++--- .../pipeline/plugins/tests/test_multiproc.py | 47 +++++----- .../plugins/tests/test_multiproc_nondaemon.py | 17 ++-- nipype/utils/draw_gantt_chart.py | 90 ++++++++++++------- 13 files changed, 179 insertions(+), 94 deletions(-) diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 159acfeaf1..4437a3ccd2 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -8,8 +8,8 @@ from .base import Info from .preprocess import (To3D, Refit, Resample, TStat, Automask, Volreg, Merge, - ZCutUp, Calc, TShift, Warp, Detrend, Despike, - Copy, Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, + ZCutUp, Calc, TShift, Warp, Detrend, Despike, Copy, + Fourier, Allineate, Maskave, SkullStrip, TCat, Fim, BlurInMask, Autobox, TCorrMap, Bandpass, Retroicor, TCorrelate, TCorr1D, BrickStat, ROIStats, AutoTcorrelate, AFNItoNIFTI, Eval, Means) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 9ccef02689..85f2a4eaf9 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -180,7 +180,6 @@ class RefitInputSpec(CommandLineInputSpec): ' template type, e.g. TLRC, MNI, ORIG') - class Refit(CommandLine): """Changes some of the information inside a 3D dataset's header diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index c158efbddb..7ac29a5fa4 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1032,7 +1032,6 @@ def run(self, **inputs): self._check_mandatory_inputs() self._check_version_requirements(self.inputs) interface = self.__class__ - # initialize provenance tracking env = deepcopy(dict(os.environ)) runtime = Bunch(cwd=os.getcwd(), @@ -1207,8 +1206,18 @@ def _read(self, drain): # Get number of threads for process def _get_num_threads(proc): - ''' - ''' + """Function to get the number of threads a process is using + + Parameters + ---------- + proc : psutil.Process instance + the process to evaluate thead usage of + + Returns + ------- + num_threads : int + the number of threads that the process is using + """ # Import packages import psutil @@ -1223,14 +1232,32 @@ def _get_num_threads(proc): except psutil.NoSuchProcess: pass + # Return the number of threads found return num_threads # Get max resources used for process def _get_max_resources_used(proc, mem_mb, num_threads, poll=False): - ''' - docstring - ''' + """Function to get the RAM and threads usage of a process + + Paramters + --------- + proc : subprocess.Popen instance + the process to profile + mem_mb : float + the high memory watermark so far during process execution (in MB) + num_threads: int + the high thread watermark so far during process execution + poll : boolean + whether to poll the process or not + + Returns + ------- + mem_mb : float + the new high memory watermark of process (MB) + num_threads : float + the new high thread watermark of process + """ # Import packages from memory_profiler import _get_memory @@ -1264,9 +1291,8 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): import psutil runtime_profile = True except ImportError as exc: - logger.info('Unable to import packages needed for runtime '\ - 'profiling. Turning off runtime profiler.\n'\ - 'Error: %s' % exc) + logger.info('Unable to import packages needed for runtime profiling. '\ + 'Turning off runtime profiler.\nError: %s' % exc) runtime_profile = False # Init variables @@ -1305,7 +1331,6 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Init variables for memory profiling mem_mb = -1 num_threads = -1 - interval = 1 if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 7f9e9e8699..bddf78433e 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -14,6 +14,7 @@ # UseResources inputspec class UseResourcesInputSpec(CommandLineInputSpec): ''' + use_resources cmd interface inputspec ''' # Init attributes @@ -26,6 +27,7 @@ class UseResourcesInputSpec(CommandLineInputSpec): # UseResources interface class UseResources(CommandLine): ''' + use_resources cmd interface ''' # Import packages diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index 1e86a0e671..a12eb6ed24 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -19,6 +19,7 @@ def use_gb_ram(num_gb): # Eat 1 GB of memory for 1 second gb_str = ' ' * int(num_gb*1024.0**3) + # Spin CPU ctr = 0 while ctr < 100e6: ctr+= 1 diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 63b9ae13f8..9f9165e3b2 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -52,7 +52,6 @@ from ... import config, logging logger = logging.getLogger('workflow') - from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, TraitedSpec, DynamicTraitedSpec, Bunch, InterfaceResult, md5, Interface, diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 09f3ec92c2..ce618abf27 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -715,7 +715,7 @@ def func1(in1): w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', 'crashdump_dir': wd, - 'poll_sleep_duration' : 2} + 'poll_sleep_duration': 2} # test output of num_subnodes method when serial is default (False) yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1) diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 092c1883f1..f1c2176bf7 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -20,6 +20,7 @@ import numpy as np import scipy.sparse as ssp + from ...utils.filemanip import savepkl, loadpkl from ...utils.misc import str2bool from ..engine.utils import (nx, dfs_preorder, topological_sort) diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 548b98f342..83228c51d7 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -1,9 +1,17 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Callback logger for recording workflow and node run stats +""" + +# Import packages import datetime import logging +# Log node stats function def log_nodes_cb(node, status, result=None): - ''' - ''' + """Function to record node run statistics to a log file as json + dictionaries + """ # Init variables logger = logging.getLogger('callback') diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 2876994627..33c43ba28d 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -10,6 +10,7 @@ from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception import sys + import numpy as np from copy import deepcopy from ..engine import MapNode @@ -24,7 +25,20 @@ # Run node def run_node(node, updatehash): - """docstring + """Function to execute node.run(), catch and log any errors and + return the result dictionary + + Parameters + ---------- + node : nipype Node instance + the node to run + updatehash : boolean + flag for updating hash + + Returns + ------- + result : dictionary + dictionary containing the node runtime results and stats """ # Import packages @@ -45,7 +59,7 @@ def run_node(node, updatehash): result['runtime_threads'] = retval.runtime.get('runtime_threads') except: etype, eval, etr = sys.exc_info() - result['traceback'] = format_exception(etype,eval,etr) + result['traceback'] = format_exception(etype, eval, etr) result['result'] = node.result # Return the result dictionary @@ -125,7 +139,6 @@ def _wait(self): semaphore_singleton.semaphore.acquire() semaphore_singleton.semaphore.release() - def _get_result(self, taskid): if taskid not in self._taskresult: raise RuntimeError('Multiproc task %d not found' % taskid) @@ -133,7 +146,6 @@ def _get_result(self, taskid): return None return self._taskresult[taskid].get() - def _report_crash(self, node, result=None): if result and result['traceback']: node._result = result['result'] @@ -167,7 +179,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): executing_now = [] # Check to see if a job is available - jobids = np.flatnonzero((self.proc_pending == True) & (self.depidx.sum(axis=0) == 0).__array__()) + jobids = np.flatnonzero((self.proc_pending == True) & \ + (self.depidx.sum(axis=0) == 0).__array__()) #check available system resources by summing all threads and memory used busy_memory = 0 @@ -181,22 +194,29 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): #check all jobs without dependency not run - jobids = np.flatnonzero((self.proc_done == False) & (self.depidx.sum(axis=0) == 0).__array__()) + jobids = np.flatnonzero((self.proc_done == False) & \ + (self.depidx.sum(axis=0) == 0).__array__()) #sort jobs ready to run first by memory and then by number of threads #The most resource consuming jobs run first - jobids = sorted(jobids, key=lambda item: (self.procs[item]._interface.estimated_memory, self.procs[item]._interface.num_threads)) + jobids = sorted(jobids, + key=lambda item: (self.procs[item]._interface.estimated_memory, + self.procs[item]._interface.num_threads)) - logger.debug('Free memory: %d, Free processors: %d', free_memory, free_processors) + logger.debug('Free memory: %d, Free processors: %d', + free_memory, free_processors) #while have enough memory and processors for first job #submit first job on the list for jobid in jobids: - logger.debug('Next Job: %d, memory: %d, threads: %d' %(jobid, self.procs[jobid]._interface.estimated_memory, self.procs[jobid]._interface.num_threads)) + logger.debug('Next Job: %d, memory: %d, threads: %d' \ + % (jobid, self.procs[jobid]._interface.estimated_memory, + self.procs[jobid]._interface.num_threads)) - if self.procs[jobid]._interface.estimated_memory <= free_memory and self.procs[jobid]._interface.num_threads <= free_processors: + if self.procs[jobid]._interface.estimated_memory <= free_memory and \ + self.procs[jobid]._interface.num_threads <= free_processors: logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) executing_now.append(self.procs[jobid]) @@ -228,7 +248,9 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): hash_exists, _, _, _ = self.procs[ jobid].hash_exists() logger.debug('Hash exists %s' % str(hash_exists)) - if (hash_exists and (self.procs[jobid].overwrite == False or (self.procs[jobid].overwrite == None and not self.procs[jobid]._interface.always_run))): + if (hash_exists and (self.procs[jobid].overwrite == False or \ + (self.procs[jobid].overwrite == None and \ + not self.procs[jobid]._interface.always_run))): self._task_finished_cb(jobid) self._remove_node_dirs() continue @@ -239,7 +261,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): logger.debug('Finished checking hash') if self.procs[jobid].run_without_submitting: - logger.debug('Running node %s on master thread' %self.procs[jobid]) + logger.debug('Running node %s on master thread' \ + % self.procs[jobid]) try: self.procs[jobid].run() except Exception: @@ -249,7 +272,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): else: logger.debug('submitting %s' % str(jobid)) - tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) + tid = self._submit_job(deepcopy(self.procs[jobid]), + updatehash=updatehash) if tid is None: self.proc_done[jobid] = False self.proc_pending[jobid] = False diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index cd41bbb695..887f4c50fb 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -1,12 +1,16 @@ +import logging import os -import nipype.interfaces.base as nib from tempfile import mkdtemp from shutil import rmtree +from multiprocessing import cpu_count +import psutil -from nipype.testing import assert_equal, assert_less_equal +import nipype.interfaces.base as nib +from nipype.utils import draw_gantt_chart +from nipype.testing import assert_equal import nipype.pipeline.engine as pe +from nipype.pipeline.plugins.callback_log import log_nodes_cb - class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') @@ -52,13 +56,12 @@ def test_run_multiproc(): os.chdir(cur_dir) rmtree(temp_dir) -################################ - class InputSpecSingleNode(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') input2 = nib.traits.Int(desc='a random int') + class OutputSpecSingleNode(nib.TraitedSpec): output1 = nib.traits.Int(desc='a random int') @@ -78,11 +81,12 @@ def _list_outputs(self): def find_metrics(nodes, last_node): - import json - from dateutil.parser import parse - from datetime import datetime - import datetime as d + """ + """ + # Import packages + from dateutil.parser import parse + import datetime start = parse(nodes[0]['start']) total_duration = int((parse(last_node['finish']) - start).total_seconds()) @@ -113,20 +117,11 @@ def find_metrics(nodes, last_node): if node_start > x: break - now += d.timedelta(seconds=1) + now += datetime.timedelta(seconds=1) return total_memory, total_threads -import os -from nipype.pipeline.plugins.callback_log import log_nodes_cb -import logging -import logging.handlers -import psutil -from multiprocessing import cpu_count - -from nipype.utils import draw_gantt_chart - def test_do_not_use_more_memory_then_specified(): LOG_FILENAME = 'callback.log' my_logger = logging.getLogger('callback') @@ -154,8 +149,9 @@ def test_do_not_use_more_memory_then_specified(): pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 10 - pipe.run(plugin='ResourceMultiProc', plugin_args={'memory': max_memory, - 'status_callback': log_nodes_cb}) + pipe.run(plugin='ResourceMultiProc', + plugin_args={'memory': max_memory, + 'status_callback': log_nodes_cb}) nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) @@ -178,13 +174,12 @@ def test_do_not_use_more_memory_then_specified(): result = False break - yield assert_equal, result, True, "using more threads than system has (threads is not specified by user)" + yield assert_equal, result, True,\ + "using more threads than system has (threads is not specified by user)" os.remove(LOG_FILENAME) - - def test_do_not_use_more_threads_then_specified(): LOG_FILENAME = 'callback.log' my_logger = logging.getLogger('callback') @@ -233,7 +228,7 @@ def test_do_not_use_more_threads_then_specified(): if m > max_memory: result = False break - yield assert_equal, result, True, "using more memory than system has (memory is not specified by user)" + yield assert_equal, result, True,\ + "using more memory than system has (memory is not specified by user)" os.remove(LOG_FILENAME) - diff --git a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py index 429eff0f26..59f32e5560 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py @@ -1,3 +1,9 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Testing module for functions and classes from multiproc.py +""" + +# Import packages from builtins import range import os from tempfile import mkdtemp @@ -15,9 +21,9 @@ def mytestFunction(insum=0): # need to import here since this is executed as an external process import multiprocessing + import os import tempfile import time - import os numberOfThreads = 2 @@ -74,17 +80,18 @@ def dummyFunction(filename): # read in all temp files and sum them up total = insum - for file in f: - with open(file) as fd: + for ff in f: + with open(ff) as fd: total += int(fd.read()) - os.remove(file) + os.remove(ff) return total def run_multiproc_nondaemon_with_flag(nondaemon_flag): ''' - Start a pipe with two nodes using the resource multiproc plugin and passing the nondaemon_flag. + Start a pipe with two nodes using the resource multiproc plugin and + passing the nondaemon_flag. ''' cur_dir = os.getcwd() diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index b435d5d925..93b012daf3 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -1,9 +1,16 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Module to draw an html gantt chart from logfile produced by +callback_log.log_nodes_cb() +""" + +# Import packages import json from dateutil import parser import datetime import random - + def log_to_json(logfile): result = [] with open(logfile, 'r') as content: @@ -31,9 +38,11 @@ def log_to_json(logfile): #fint the end node for that start for j in range(i+1, len(lines)): if lines[j].has_key('finish'): - if lines[j]['id'] == line['id'] and lines[j]['name'] == line['name']: + if lines[j]['id'] == line['id'] and \ + lines[j]['name'] == line['name']: line['finish'] = lines[j]['finish'] - line['duration'] = (parser.parse(line['finish']) - parser.parse(line['start'])).total_seconds() + line['duration'] = (parser.parse(line['finish']) - \ + parser.parse(line['start'])).total_seconds() result.append(line) break @@ -58,6 +67,7 @@ def draw_lines(start, total_duration, minute_scale, scale): next_time += datetime.timedelta(minutes=minute_scale) return result + def draw_nodes(start, nodes, cores, scale, colors): result = '' end_times = [datetime.datetime(start.year, start.month, start.day, start.hour, start.minute, start.second) for x in range(cores)] @@ -75,12 +85,22 @@ def draw_nodes(start, nodes, cores, scale, colors): for j in range(len(end_times)): if end_times[j] < node_start: left += j * 30 - end_times[j] = datetime.datetime(node_finish.year, node_finish.month, node_finish.day, node_finish.hour, node_finish.minute, node_finish.second) + end_times[j] = datetime.datetime(node_finish.year, + node_finish.month, + node_finish.day, + node_finish.hour, + node_finish.minute, + node_finish.second) #end_times[j]+= datetime.timedelta(microseconds=node_finish.microsecond) break color = random.choice(colors) - new_node = "
"; + new_node = "
"; result += new_node return result @@ -115,14 +135,15 @@ def draw_thread_bar(start, total_duration, nodes, space_between_minutes, minute_ for i in range(len(thread)): width = thread[i] * 10 t = (i*scale*minute_scale) + 220 - bar = "
" + bar = "
" result += bar return result - -def draw_memory_bar(start, total_duration, nodes, space_between_minutes, minute_scale): +def draw_memory_bar(start, total_duration, nodes, + space_between_minutes, minute_scale): result = "

Memory

" total = total_duration/60 @@ -152,36 +173,39 @@ def draw_memory_bar(start, total_duration, nodes, space_between_minutes, minute_ for i in range(len(memory)): width = memory[i] * 10 t = (i*scale*minute_scale) + 220 - bar = "
" + bar = "
" result += bar return result -''' -Generates a gantt chart in html showing the workflow execution based on a callback log file. -This script was intended to be used with the ResourceMultiprocPlugin. -The following code shows how to set up the workflow in order to generate the log file: - -# import logging -# import logging.handlers -# from nipype.pipeline.plugins.callback_log import log_nodes_cb - -# log_filename = 'callback.log' -# logger = logging.getLogger('callback') -# logger.setLevel(logging.DEBUG) -# handler = logging.FileHandler(log_filename) -# logger.addHandler(handler) - -# #create workflow -# workflow = ... - -# workflow.run(plugin='ResourceMultiProc', -# plugin_args={'num_threads':8, 'memory':12, 'status_callback': log_nodes_cb}) - -# generate_gantt_chart('callback.log', 8) -''' -def generate_gantt_chart(logfile, cores, minute_scale=10, space_between_minutes=50, colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"]): +def generate_gantt_chart(logfile, cores, minute_scale=10, + space_between_minutes=50, + colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"]): + ''' + Generates a gantt chart in html showing the workflow execution based on a callback log file. + This script was intended to be used with the ResourceMultiprocPlugin. + The following code shows how to set up the workflow in order to generate the log file: + + # import logging + # import logging.handlers + # from nipype.pipeline.plugins.callback_log import log_nodes_cb + + # log_filename = 'callback.log' + # logger = logging.getLogger('callback') + # logger.setLevel(logging.DEBUG) + # handler = logging.FileHandler(log_filename) + # logger.addHandler(handler) + + # #create workflow + # workflow = ... + + # workflow.run(plugin='ResourceMultiProc', + # plugin_args={'num_threads':8, 'memory':12, 'status_callback': log_nodes_cb}) + + # generate_gantt_chart('callback.log', 8) + ''' result, last_node = log_to_json(logfile) scale = space_between_minutes From 9d19e140d730ed74e7080fa0d78c4129c0bf57b8 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 24 Feb 2016 15:34:54 -0500 Subject: [PATCH 29/78] Changed memory parameters to be memory_gb to be more explicit, used runtime Bunch object only for runtime stats storage instead of using results dictionary, renamed ResourceMultiProc to MultiProc for backwards-compatiblity --- nipype/interfaces/ants/base.py | 2 +- nipype/interfaces/base.py | 4 +- .../interfaces/tests/test_runtime_profiler.py | 9 ++--- nipype/pipeline/engine/nodes.py | 19 ++++++---- nipype/pipeline/engine/tests/test_engine.py | 4 +- nipype/pipeline/engine/tests/test_utils.py | 2 +- nipype/pipeline/plugins/__init__.py | 2 +- nipype/pipeline/plugins/base.py | 9 ++--- nipype/pipeline/plugins/callback_log.py | 37 ++++++++---------- nipype/pipeline/plugins/multiproc.py | 38 +++++++------------ nipype/pipeline/plugins/tests/test_base.py | 2 +- .../pipeline/plugins/tests/test_callback.py | 4 +- .../pipeline/plugins/tests/test_multiproc.py | 18 ++++----- .../plugins/tests/test_multiproc_nondaemon.py | 4 +- nipype/utils/draw_gantt_chart.py | 8 ++-- 15 files changed, 73 insertions(+), 89 deletions(-) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index c3ea4a674e..20fab05881 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -12,7 +12,7 @@ # -Using -1 gives primary responsibilty to ITKv4 to do the correct # thread limitings. # -Using 1 takes a very conservative approach to avoid overloading -# the computer (when running ResourceMultiProc) by forcing everything to +# the computer (when running MultiProc) by forcing everything to # single threaded. This can be a severe penalty for registration # performance. LOCAL_DEFAULT_NUMBER_OF_THREADS = 1 diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 7ac29a5fa4..403dba2bea 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -764,7 +764,7 @@ def __init__(self, **inputs): raise Exception('No input_spec in class: %s' % self.__class__.__name__) self.inputs = self.input_spec(**inputs) - self.estimated_memory = 1 + self.estimated_memory_gb = 1 self.num_threads = 1 @classmethod @@ -1406,7 +1406,7 @@ def _process(drain=0): result['stderr'] = [] result['merged'] = '' - setattr(runtime, 'runtime_memory', mem_mb/1024.0) + setattr(runtime, 'runtime_memory_gb', mem_mb/1024.0) setattr(runtime, 'runtime_threads', num_threads) runtime.stderr = '\n'.join(result['stderr']) runtime.stdout = '\n'.join(result['stdout']) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index bddf78433e..6912ed1cca 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -47,8 +47,7 @@ class UseResources(CommandLine): # Test case for the run function class RuntimeProfilerTestCase(unittest.TestCase): ''' - This class is a test case for the ResourceMultiProc plugin runtime - profiler + This class is a test case for the runtime profiler Inherits -------- @@ -129,7 +128,7 @@ def _run_workflow(self): # Resources used node resource_node = pe.Node(UseResources(), name='resource_node') - resource_node.interface.estimated_memory = num_gb + resource_node.interface.estimated_memory_gb = num_gb resource_node.interface.num_threads = num_procs # Connect workflow @@ -140,7 +139,7 @@ def _run_workflow(self): plugin_args = {'n_procs' : num_procs, 'memory' : num_gb, 'status_callback' : log_nodes_cb} - wf.run(plugin='ResourceMultiProc', plugin_args=plugin_args) + wf.run(plugin='MultiProc', plugin_args=plugin_args) # Get runtime stats from log file finish_str = open(log_file, 'r').readlines()[1].rstrip('\n') @@ -169,7 +168,7 @@ def test_wf_logfile(self): node_stats = json.loads(finish_str) # Read out runtime stats - runtime_gb = float(node_stats['runtime_memory']) + runtime_gb = float(node_stats['runtime_memory_gb']) runtime_procs = int(node_stats['runtime_threads']) # Error message formatting diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 9f9165e3b2..320feaed7e 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -737,15 +737,20 @@ def write_report(self, report_type=None, cwd=None): fp.close() return fp.writelines(write_rst_header('Runtime info', level=1)) + # Init rst dictionary of runtime stats + rst_dict = {'hostname' : self.result.runtime.hostname, + 'duration' : self.result.runtime.duration} + # Try and insert memory/threads usage if available + try: + rst_dict['runtime_memory_gb'] = self.result.runtime.runtime_memory_gb + rst_dict['runtime_threads'] = self.result.runtime.runtime_threads + except: + logger.info('Runtime memory and threads stats unavailable') if hasattr(self.result.runtime, 'cmdline'): - fp.writelines(write_rst_dict( - {'hostname': self.result.runtime.hostname, - 'duration': self.result.runtime.duration, - 'command': self.result.runtime.cmdline})) + rst_dict['command'] = self.result.runtime.cmdline + fp.writelines(write_rst_dict(rst_dict)) else: - fp.writelines(write_rst_dict( - {'hostname': self.result.runtime.hostname, - 'duration': self.result.runtime.duration})) + fp.writelines(write_rst_dict(rst_dict)) if hasattr(self.result.runtime, 'merged'): fp.writelines(write_rst_header('Terminal output', level=2)) fp.writelines(write_rst_list(self.result.runtime.merged)) diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index ce618abf27..5eaaa81fbf 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -723,7 +723,7 @@ def func1(in1): # test running the workflow on default conditions error_raised = False try: - w1.run(plugin='ResourceMultiProc') + w1.run(plugin='MultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) @@ -737,7 +737,7 @@ def func1(in1): # test running the workflow on serial conditions error_raised = False try: - w1.run(plugin='ResourceMultiProc') + w1.run(plugin='MultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 9688e02395..8420f587c2 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -214,7 +214,7 @@ def test_function3(arg): out_dir = mkdtemp() - for plugin in ('Linear',): # , 'ResourceMultiProc'): + for plugin in ('Linear',): # , 'MultiProc'): n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['out_file1', 'out_file2', 'dir'], function=test_function), diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index 643d5735f8..45a8c40e8f 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -9,7 +9,7 @@ from .sge import SGEPlugin from .condor import CondorPlugin from .dagman import CondorDAGManPlugin -from .multiproc import ResourceMultiProcPlugin +from .multiproc import MultiProcPlugin from .ipython import IPythonPlugin from .somaflow import SomaFlowPlugin from .pbsgraph import PBSGraphPlugin diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index f1c2176bf7..8f4638db30 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -246,7 +246,7 @@ def run(self, graph, config, updatehash=False): notrun.append(self._clean_queue(jobid, graph, result=result)) else: - self._task_finished_cb(jobid, result) + self._task_finished_cb(jobid) self._remove_node_dirs() self._clear_task(taskid) else: @@ -415,7 +415,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): else: break - def _task_finished_cb(self, jobid, result=None): + def _task_finished_cb(self, jobid): """ Extract outputs and assign to inputs of dependent tasks This is called when a job is completed. @@ -423,10 +423,7 @@ def _task_finished_cb(self, jobid, result=None): logger.info('[Job finished] jobname: %s jobid: %d' % (self.procs[jobid]._id, jobid)) if self._status_callback: - if result == None: - if self._taskresult.has_key(jobid): - result = self._taskresult[jobid].get() - self._status_callback(self.procs[jobid], 'end', result) + self._status_callback(self.procs[jobid], 'end') # Update job and worker queues self.proc_pending[jobid] = False # update the job dependency structure diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index 83228c51d7..d4445a28c9 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -8,7 +8,7 @@ import logging # Log node stats function -def log_nodes_cb(node, status, result=None): +def log_nodes_cb(node, status): """Function to record node run statistics to a log file as json dictionaries """ @@ -17,47 +17,40 @@ def log_nodes_cb(node, status, result=None): logger = logging.getLogger('callback') # Check runtime profile stats - if result is None: - runtime_memory = runtime_seconds = runtime_threads = 'N/A' - else: - try: - runtime_memory = result['runtime_memory'] - except KeyError: - runtime_memory = 'Unknown' + if node.result is not None: try: - runtime_seconds = result['runtime_seconds'] - except KeyError: - runtime_seconds = 'Unknown' - try: - runtime_threads = result['runtime_threads'] + runtime = node.result.runtime + runtime_memory_gb = runtime.runtime_memory_gb + runtime_threads = runtime.runtime_threads except: - runtime_threads = 'Unknown' + runtime_memory_gb = runtime_threads = 'Unkown' + else: + runtime_memory_gb = runtime_threads = 'N/A' # Check status and write to log # Start if status == 'start': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ - '"' + ',"estimated_memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ - + str(node._interface.num_threads) + '}' + '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ + ',"num_threads":' + str(node._interface.num_threads) + '}' logger.debug(message) # End elif status == 'end': message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ - '"' + ',"estimated_memory":' + '"'+ str(node._interface.estimated_memory) + '"'+ \ - ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ + '"' + ',"estimated_memory_gb":' + '"'+ str(node._interface.estimated_memory_gb) + \ + '"'+ ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ ',"runtime_threads":' + '"'+ str(runtime_threads) + '"'+ \ - ',"runtime_memory":' + '"'+ str(runtime_memory) + '"' + \ - ',"runtime_seconds":' + '"'+ str(runtime_seconds) + '"'+ '}' + ',"runtime_memory_gb":' + '"'+ str(runtime_memory_gb) + '"' + '}' logger.debug(message) # Other else: message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ - '"' + ',"estimated_memory":' + str(node._interface.estimated_memory) + ',"num_threads":' \ - + str(node._interface.num_threads) + ',"error":"True"}' + '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ + ',"num_threads":' + str(node._interface.num_threads) + ',"error":"True"}' logger.debug(message) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 33c43ba28d..1af2b8bdba 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -41,22 +41,12 @@ def run_node(node, updatehash): dictionary containing the node runtime results and stats """ - # Import packages - import datetime - # Init variables result = dict(result=None, traceback=None) - # + # Try and execute the node via node.run() try: - start = datetime.datetime.now() - retval = node.run(updatehash=updatehash) - run_secs = (datetime.datetime.now() - start).total_seconds() - result['result'] = retval - result['runtime_seconds'] = run_secs - if hasattr(retval.runtime, 'get'): - result['runtime_memory'] = retval.runtime.get('runtime_memory') - result['runtime_threads'] = retval.runtime.get('runtime_threads') + result['result'] = node.run(updatehash=updatehash) except: etype, eval, etr = sys.exc_info() result['traceback'] = format_exception(etype, eval, etr) @@ -88,7 +78,7 @@ def release_lock(args): semaphore_singleton.semaphore.release() -class ResourceMultiProcPlugin(DistributedPluginBase): +class MultiProcPlugin(DistributedPluginBase): """Execute workflow with multiprocessing, not sending more jobs at once than the system can support. @@ -98,7 +88,7 @@ class ResourceMultiProcPlugin(DistributedPluginBase): the number of threads and memory of the system is used. System consuming nodes should be tagged: - memory_consuming_node.interface.estimated_memory = 8 #Gb + memory_consuming_node.interface.estimated_memory_gb = 8 thread_consuming_node.interface.num_threads = 16 The default number of threads and memory for a node is 1. @@ -107,12 +97,12 @@ class ResourceMultiProcPlugin(DistributedPluginBase): - non_daemon : boolean flag to execute as non-daemon processes - num_threads: maximum number of threads to be executed in parallel - - estimated_memory: maximum memory that can be used at once. + - estimated_memory_gb: maximum memory (in GB) that can be used at once. """ def __init__(self, plugin_args=None): - super(ResourceMultiProcPlugin, self).__init__(plugin_args=plugin_args) + super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) self._taskresult = {} self._taskid = 0 non_daemon = True @@ -186,8 +176,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): busy_memory = 0 busy_processors = 0 for jobid in jobids: - busy_memory+= self.procs[jobid]._interface.estimated_memory - busy_processors+= self.procs[jobid]._interface.num_threads + busy_memory += self.procs[jobid]._interface.estimated_memory_gb + busy_processors += self.procs[jobid]._interface.num_threads free_memory = self.memory - busy_memory free_processors = self.processors - busy_processors @@ -201,21 +191,21 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): #sort jobs ready to run first by memory and then by number of threads #The most resource consuming jobs run first jobids = sorted(jobids, - key=lambda item: (self.procs[item]._interface.estimated_memory, + key=lambda item: (self.procs[item]._interface.estimated_memory_gb, self.procs[item]._interface.num_threads)) - logger.debug('Free memory: %d, Free processors: %d', + logger.debug('Free memory (GB): %d, Free processors: %d', free_memory, free_processors) #while have enough memory and processors for first job #submit first job on the list for jobid in jobids: - logger.debug('Next Job: %d, memory: %d, threads: %d' \ - % (jobid, self.procs[jobid]._interface.estimated_memory, + logger.debug('Next Job: %d, memory (GB): %d, threads: %d' \ + % (jobid, self.procs[jobid]._interface.estimated_memory_gb, self.procs[jobid]._interface.num_threads)) - if self.procs[jobid]._interface.estimated_memory <= free_memory and \ + if self.procs[jobid]._interface.estimated_memory_gb <= free_memory and \ self.procs[jobid]._interface.num_threads <= free_processors: logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) executing_now.append(self.procs[jobid]) @@ -236,7 +226,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self.proc_done[jobid] = True self.proc_pending[jobid] = True - free_memory -= self.procs[jobid]._interface.estimated_memory + free_memory -= self.procs[jobid]._interface.estimated_memory_gb free_processors -= self.procs[jobid]._interface.num_threads # Send job to task manager and add to pending tasks diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py index 616cb634a0..243ae195c2 100644 --- a/nipype/pipeline/plugins/tests/test_base.py +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -38,5 +38,5 @@ def func(arg1): wf.add_nodes([funkynode]) wf.base_dir = '/tmp' -wf.run(plugin='ResourceMultiProc') +wf.run(plugin='MultiProc') ''' diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index f173a9b30c..0769781e8a 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -76,7 +76,7 @@ def test_callback_multiproc_normal(): wf.add_nodes([f_node]) wf.config['execution']['crashdump_dir'] = wf.base_dir wf.config['execution']['poll_sleep_duration'] = 2 - wf.run(plugin='ResourceMultiProc', plugin_args={'status_callback': so.callback}) + wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) assert_equal(len(so.statuses), 2) for (n, s) in so.statuses: yield assert_equal, n.name, 'f_node' @@ -94,7 +94,7 @@ def test_callback_multiproc_exception(): wf.add_nodes([f_node]) wf.config['execution']['crashdump_dir'] = wf.base_dir try: - wf.run(plugin='ResourceMultiProc', + wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) except: pass diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 887f4c50fb..1165702b1d 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -48,7 +48,7 @@ def test_run_multiproc(): pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 pipe.config['execution']['poll_sleep_duration'] = 2 - execgraph = pipe.run(plugin="ResourceMultiProc") + execgraph = pipe.run(plugin="MultiProc") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.mod1')] result = node.get_output('output1') @@ -110,7 +110,7 @@ def find_metrics(nodes, last_node): node_finish = parse(nodes[j]['finish']) if node_start < x and node_finish > x: - total_memory[i] += nodes[j]['estimated_memory'] + total_memory[i] += nodes[j]['estimated_memory_gb'] total_threads[i] += nodes[j]['num_threads'] start_index = j @@ -138,10 +138,10 @@ def test_do_not_use_more_memory_then_specified(): n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') - n1.interface.estimated_memory = 1 - n2.interface.estimated_memory = 1 - n3.interface.estimated_memory = 10 - n4.interface.estimated_memory = 1 + n1.interface.estimated_memory_gb = 1 + n2.interface.estimated_memory_gb = 1 + n3.interface.estimated_memory_gb = 10 + n4.interface.estimated_memory_gb = 1 pipe.connect(n1, 'output1', n2, 'input1') pipe.connect(n1, 'output1', n3, 'input1') @@ -149,7 +149,7 @@ def test_do_not_use_more_memory_then_specified(): pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 10 - pipe.run(plugin='ResourceMultiProc', + pipe.run(plugin='MultiProc', plugin_args={'memory': max_memory, 'status_callback': log_nodes_cb}) @@ -207,8 +207,8 @@ def test_do_not_use_more_threads_then_specified(): pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 10 pipe.config['execution']['poll_sleep_duration'] = 1 - pipe.run(plugin='ResourceMultiProc', plugin_args={'n_procs': max_threads, - 'status_callback': log_nodes_cb}) + pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, + 'status_callback': log_nodes_cb}) nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) #usage in every second diff --git a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py index 59f32e5560..cdba9da5b5 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py @@ -115,9 +115,9 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): pipe.config['execution']['stop_on_first_crash'] = True - # execute the pipe using the ResourceMultiProc plugin with 2 processes and the non_daemon flag + # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag # to enable child processes which start other multiprocessing jobs - execgraph = pipe.run(plugin="ResourceMultiProc", + execgraph = pipe.run(plugin="MultiProc", plugin_args={'n_procs': 2, 'non_daemon': nondaemon_flag}) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 93b012daf3..6142d8b153 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -161,7 +161,7 @@ def draw_memory_bar(start, total_duration, nodes, node_finish = parser.parse(nodes[j]['finish']) if node_start <= now and node_finish >= now: - memory[i] += nodes[j]['estimated_memory'] + memory[i] += nodes[j]['estimated_memory_gb'] if node_start > now: break now += datetime.timedelta(minutes=1) @@ -185,7 +185,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"]): ''' Generates a gantt chart in html showing the workflow execution based on a callback log file. - This script was intended to be used with the ResourceMultiprocPlugin. + This script was intended to be used with the MultiprocPlugin. The following code shows how to set up the workflow in order to generate the log file: # import logging @@ -201,8 +201,8 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, # #create workflow # workflow = ... - # workflow.run(plugin='ResourceMultiProc', - # plugin_args={'num_threads':8, 'memory':12, 'status_callback': log_nodes_cb}) + # workflow.run(plugin='MultiProc', + # plugin_args={'n_procs':8, 'memory':12, 'status_callback': log_nodes_cb}) # generate_gantt_chart('callback.log', 8) ''' From 0388305c74df036b728f756b23d7f53cf2a43b93 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 25 Feb 2016 12:36:31 -0500 Subject: [PATCH 30/78] Added checks for python deps and added method using builtin std library functions to get system memory --- .../interfaces/tests/test_runtime_profiler.py | 10 +++ nipype/pipeline/plugins/multiproc.py | 63 ++++++++++++++----- .../pipeline/plugins/tests/test_multiproc.py | 4 +- 3 files changed, 58 insertions(+), 19 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 6912ed1cca..5b4c8fa230 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -10,6 +10,15 @@ import unittest from nipype.interfaces.base import traits, CommandLine, CommandLineInputSpec +try: + import psutil + import memory_profiler + run_profiler = True + skip_profile_msg = 'Run profiler tests' +except ImportError as exc: + skip_profile_msg = 'Missing python packages for runtime profiling, skipping...\n'\ + 'Error: %s' % exc + run_profiler = False # UseResources inputspec class UseResourcesInputSpec(CommandLineInputSpec): @@ -151,6 +160,7 @@ def _run_workflow(self): return finish_str # Test resources were used as expected + @unittest.skipIf(run_profiler == False, skip_profile_msg) def test_wf_logfile(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 1af2b8bdba..47bbcaa6d5 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -9,13 +9,13 @@ # Import packages from multiprocessing import Process, Pool, cpu_count, pool from traceback import format_exception +import os import sys import numpy as np from copy import deepcopy from ..engine import MapNode from ...utils.misc import str2bool -import psutil from ... import logging import semaphore_singleton from .base import (DistributedPluginBase, report_crash) @@ -78,6 +78,34 @@ def release_lock(args): semaphore_singleton.semaphore.release() +# Get total system RAM +def get_system_total_memory_gb(): + """Function to get the total RAM of the running system in GB + """ + + # Import packages + import os + import sys + + # Get memory + if 'linux' in sys.platform: + with open('/proc/meminfo', 'r') as f_in: + meminfo_lines = f_in.readlines() + mem_total_line = [line for line in meminfo_lines \ + if 'MemTotal' in line][0] + mem_total = float(mem_total_line.split()[1]) + memory_gb = mem_total/(1024.0**2) + elif 'darwin' in sys.platform: + mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1] + memory_gb = float(mem_str)/(1024.0**3) + else: + err_msg = 'System platform: %s is not supported' + raise Exception(err_msg) + + # Return memory + return memory_gb + + class MultiProcPlugin(DistributedPluginBase): """Execute workflow with multiprocessing, not sending more jobs at once than the system can support. @@ -102,14 +130,16 @@ class MultiProcPlugin(DistributedPluginBase): """ def __init__(self, plugin_args=None): + # Init variables and instance attributes super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) self._taskresult = {} self._taskid = 0 non_daemon = True self.plugin_args = plugin_args self.processors = cpu_count() - memory = psutil.virtual_memory() - self.memory = float(memory.total) / (1024.0**3) + self.memory_gb = get_system_total_memory_gb() + + # Check plugin args if self.plugin_args: if 'non_daemon' in self.plugin_args: non_daemon = plugin_args['non_daemon'] @@ -117,7 +147,7 @@ def __init__(self, plugin_args=None): self.processors = self.plugin_args['n_procs'] if 'memory' in self.plugin_args: self.memory = self.plugin_args['memory'] - + # Instantiate different thread pools for non-daemon processes if non_daemon: # run the execution using the non-daemon pool subclass self.pool = NonDaemonPool(processes=self.processors) @@ -172,40 +202,39 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): jobids = np.flatnonzero((self.proc_pending == True) & \ (self.depidx.sum(axis=0) == 0).__array__()) - #check available system resources by summing all threads and memory used - busy_memory = 0 + # Check available system resources by summing all threads and memory used + busy_memory_gb = 0 busy_processors = 0 for jobid in jobids: - busy_memory += self.procs[jobid]._interface.estimated_memory_gb + busy_memory_gb += self.procs[jobid]._interface.estimated_memory_gb busy_processors += self.procs[jobid]._interface.num_threads - free_memory = self.memory - busy_memory + free_memory_gb = self.memory_gb - busy_memory_gb free_processors = self.processors - busy_processors - #check all jobs without dependency not run + # Check all jobs without dependency not run jobids = np.flatnonzero((self.proc_done == False) & \ (self.depidx.sum(axis=0) == 0).__array__()) - #sort jobs ready to run first by memory and then by number of threads - #The most resource consuming jobs run first + # Sort jobs ready to run first by memory and then by number of threads + # The most resource consuming jobs run first jobids = sorted(jobids, key=lambda item: (self.procs[item]._interface.estimated_memory_gb, self.procs[item]._interface.num_threads)) logger.debug('Free memory (GB): %d, Free processors: %d', - free_memory, free_processors) - + free_memory_gb, free_processors) - #while have enough memory and processors for first job - #submit first job on the list + # While have enough memory and processors for first job + # Submit first job on the list for jobid in jobids: logger.debug('Next Job: %d, memory (GB): %d, threads: %d' \ % (jobid, self.procs[jobid]._interface.estimated_memory_gb, self.procs[jobid]._interface.num_threads)) - if self.procs[jobid]._interface.estimated_memory_gb <= free_memory and \ + if self.procs[jobid]._interface.estimated_memory_gb <= free_memory_gb and \ self.procs[jobid]._interface.num_threads <= free_processors: logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) executing_now.append(self.procs[jobid]) @@ -226,7 +255,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self.proc_done[jobid] = True self.proc_pending[jobid] = True - free_memory -= self.procs[jobid]._interface.estimated_memory_gb + free_memory_gb -= self.procs[jobid]._interface.estimated_memory_gb free_processors -= self.procs[jobid]._interface.num_threads # Send job to task manager and add to pending tasks diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 1165702b1d..264753c8a0 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -3,13 +3,13 @@ from tempfile import mkdtemp from shutil import rmtree from multiprocessing import cpu_count -import psutil import nipype.interfaces.base as nib from nipype.utils import draw_gantt_chart from nipype.testing import assert_equal import nipype.pipeline.engine as pe from nipype.pipeline.plugins.callback_log import log_nodes_cb +from nipype.pipeline.plugins.multiproc import get_system_total_memory_gb class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') @@ -222,7 +222,7 @@ def test_do_not_use_more_threads_then_specified(): yield assert_equal, result, True, "using more threads than specified" - max_memory = psutil.virtual_memory().total / (1024*1024) + max_memory = get_system_total_memory_gb() result = True for m in memory: if m > max_memory: From 1e4ce5b98b39c03f4a6a7096f510a868cf782689 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 25 Feb 2016 15:28:32 -0500 Subject: [PATCH 31/78] Fixed exception formatting and import error --- nipype/pipeline/plugins/__init__.py | 1 + nipype/pipeline/plugins/multiproc.py | 2 +- nipype/utils/draw_gantt_chart.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index 45a8c40e8f..0bf1a8d2f5 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -19,3 +19,4 @@ from .slurmgraph import SLURMGraphPlugin from .callback_log import log_nodes_cb +from . import semaphore_singleton diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 47bbcaa6d5..503a9d8c2a 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -17,7 +17,7 @@ from ..engine import MapNode from ...utils.misc import str2bool from ... import logging -import semaphore_singleton +from nipype.pipeline.plugins import semaphore_singleton from .base import (DistributedPluginBase, report_crash) # Init logger diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 6142d8b153..81033f9022 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -23,7 +23,7 @@ def log_to_json(logfile): try: y = json.loads(i) l.append(y) - except Exception, e: + except Exception: pass lines = l From ace73686a74328a6769fe51b015aabda8b61f69b Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 1 Mar 2016 14:47:17 -0500 Subject: [PATCH 32/78] Removed 'Error' from logger info message when memory_profiler or psutil are not found --- nipype/interfaces/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 403dba2bea..1ac3894622 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1292,7 +1292,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): runtime_profile = True except ImportError as exc: logger.info('Unable to import packages needed for runtime profiling. '\ - 'Turning off runtime profiler.\nError: %s' % exc) + 'Turning off runtime profiler. Reason: %s' % exc) runtime_profile = False # Init variables From a515c77aec14ee914fb68a30358e9fb503b71ebc Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 8 Mar 2016 13:46:05 -0500 Subject: [PATCH 33/78] Added more code for debugging runtime profiler --- nipype/interfaces/base.py | 46 ++- .../interfaces/tests/test_runtime_profiler.py | 274 +++++++++++++++++- nipype/interfaces/tests/use_resources | 6 +- nipype/interfaces/utility.py | 71 ++++- 4 files changed, 365 insertions(+), 32 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 1ac3894622..04c45747a0 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1205,7 +1205,7 @@ def _read(self, drain): # Get number of threads for process -def _get_num_threads(proc): +def _get_num_threads(proc, log_flg=False): """Function to get the number of threads a process is using Parameters @@ -1221,12 +1221,26 @@ def _get_num_threads(proc): # Import packages import psutil + import logging # Init variables num_threads = proc.num_threads() + if log_flg: + from CPAC.utils.utils import setup_logger + logger = setup_logger('memory_profiler', '/home/dclark/memory_profiler.log', + logging.INFO, to_screen=False) + try: num_children = len(proc.children()) + if log_flg: + logger.debug('len(proc.children()): %d' % num_children) + logger.debug('proc.id: %s' % str(proc.pid)) for child in proc.children(): + if log_flg: + logger.debug('child.pid: %d' % child.pid) + logger.debug('child.threads(): %s' % str(child.threads())) + logger.debug('child.num_threads(): %d' % child.num_threads()) + logger.debug('len(child.children()): %d' % len(child.children())) num_threads = max(num_threads, num_children, child.num_threads(), len(child.children())) except psutil.NoSuchProcess: @@ -1237,19 +1251,17 @@ def _get_num_threads(proc): # Get max resources used for process -def _get_max_resources_used(proc, mem_mb, num_threads, poll=False): +def get_max_resources_used(pid, mem_mb, num_threads, log_flg=False): """Function to get the RAM and threads usage of a process Paramters --------- - proc : subprocess.Popen instance - the process to profile + pid : integer + the process ID of process to profile mem_mb : float the high memory watermark so far during process execution (in MB) num_threads: int the high thread watermark so far during process execution - poll : boolean - whether to poll the process or not Returns ------- @@ -1264,10 +1276,8 @@ def _get_max_resources_used(proc, mem_mb, num_threads, poll=False): import psutil try: - mem_mb = max(mem_mb, _get_memory(proc.pid, include_children=True)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(proc.pid))) - if poll: - proc.poll() + mem_mb = max(mem_mb, _get_memory(pid, include_children=True, log_flg=log_flg)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(pid), log_flg=log_flg)) except Exception as exc: iflogger.info('Could not get resources used by process. Error: %s'\ % exc) @@ -1331,6 +1341,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Init variables for memory profiling mem_mb = -1 num_threads = -1 + interval = .5 if output == 'stream': streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] @@ -1350,9 +1361,10 @@ def _process(drain=0): while proc.returncode is None: if runtime_profile: mem_mb, num_threads = \ - _get_max_resources_used(proc, mem_mb, num_threads) + get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() _process() + time.sleep(interval) _process(drain=1) # collect results, merge and return @@ -1369,7 +1381,9 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - _get_max_resources_used(proc, mem_mb, num_threads, poll=True) + get_max_resources_used(proc.pid, mem_mb, num_threads) + proc.poll() + time.sleep(interval) stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): try: @@ -1389,7 +1403,9 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - _get_max_resources_used(proc, mem_mb, num_threads, poll=True) + get_max_resources_used(proc.pid, mem_mb, num_threads) + proc.poll() + time.sleep(interval) ret_code = proc.wait() stderr.flush() stdout.flush() @@ -1400,7 +1416,9 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - _get_max_resources_used(proc, mem_mb, num_threads, poll=True) + get_max_resources_used(proc.pid, mem_mb, num_threads) + proc.poll() + time.sleep(interval) proc.communicate() result['stdout'] = [] result['stderr'] = [] diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 5b4c8fa230..e11a95e25d 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -53,6 +53,60 @@ class UseResources(CommandLine): _cmd = exec_path +# Spin multiple processors +def use_resources(num_procs, num_gb): + ''' + Function to execute multiple use_gb_ram functions in parallel + ''' + + # Function to occupy GB of memory + def _use_gb_ram(num_gb): + ''' + Function to consume GB of memory + ''' + + # Eat 1 GB of memory for 1 second + gb_str = ' ' * int(num_gb*1024.0**3) + + # Spin CPU + ctr = 0 + while ctr < 50e6: + ctr += 1 + + # Clear memory + del ctr + del gb_str + + # Import packages + import logging + from multiprocessing import Process + + from threading import Thread + + # Init variables + num_gb = float(num_gb) + # Init variables + #num_threads = proc.num_threads() + from CPAC.utils.utils import setup_logger + # Build proc list + proc_list = [] + for idx in range(num_procs): + #proc = Thread(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) + proc = Process(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) + proc_list.append(proc) + + logger = setup_logger('memory_profiler', '/home/dclark/memory_profiler.log', + logging.DEBUG, to_screen=False) + # Run multi-threaded + print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) + for idx, proc in enumerate(proc_list): + proc.start() + logger.debug('Starting PID: %d' % proc.pid) + + for proc in proc_list: + proc.join() + + # Test case for the run function class RuntimeProfilerTestCase(unittest.TestCase): ''' @@ -81,13 +135,18 @@ def setUp(self): a unittest.TestCase-inherited class ''' - self.num_gb = 1 - self.num_procs = 2 + # Init parameters + # Input RAM GB to occupy + self.num_gb= .75 + # Input number of processors + self.num_procs = 1 + # Acceptable percent error for memory profiled against input + self.mem_err_percent = 5 # Test node - def _run_workflow(self): + def _run_cmdline_workflow(self, num_gb, num_procs): ''' - Function to run the use_resources script in a nipype workflow + Function to run the use_resources cmdline script in a nipype workflow and return the runtime stats recorded by the profiler Parameters @@ -113,8 +172,6 @@ def _run_workflow(self): from nipype.pipeline.plugins.callback_log import log_nodes_cb # Init variables - num_gb = self.num_gb - num_procs = self.num_procs base_dir = tempfile.mkdtemp() log_file = os.path.join(base_dir, 'callback.log') @@ -125,7 +182,7 @@ def _run_workflow(self): logger.addHandler(handler) # Declare workflow - wf = pe.Workflow(name='test_runtime_prof') + wf = pe.Workflow(name='test_runtime_prof_cmd') wf.base_dir = base_dir # Input node @@ -159,21 +216,140 @@ def _run_workflow(self): # Return runtime stats return finish_str + # Test node + def _run_function_workflow(self, num_gb, num_procs): + ''' + Function to run the use_resources() function in a nipype workflow + and return the runtime stats recorded by the profiler + + Parameters + ---------- + self : RuntimeProfileTestCase + a unittest.TestCase-inherited class + + Returns + ------- + finish_str : string + a json-compatible dictionary string containing the runtime + statistics of the nipype node that used system resources + ''' + + # Import packages + import logging + import os + import shutil + import tempfile + + import nipype.pipeline.engine as pe + import nipype.interfaces.utility as util + from nipype.pipeline.plugins.callback_log import log_nodes_cb + + # Init variables + base_dir = tempfile.mkdtemp() + log_file = os.path.join(base_dir, 'callback.log') + + # Init logger + logger = logging.getLogger('callback') + logger.setLevel(logging.DEBUG) + handler = logging.FileHandler(log_file) + logger.addHandler(handler) + + # Declare workflow + wf = pe.Workflow(name='test_runtime_prof_func') + wf.base_dir = base_dir + + # Input node + input_node = pe.Node(util.IdentityInterface(fields=['num_gb', + 'num_procs']), + name='input_node') + input_node.inputs.num_gb = num_gb + input_node.inputs.num_procs = num_procs + + # Resources used node + resource_node = pe.Node(util.Function(input_names=['num_procs', + 'num_gb'], + output_names=[], + function=use_resources), + name='resource_node') + resource_node.interface.estimated_memory_gb = num_gb + resource_node.interface.num_threads = num_procs + + # Connect workflow + wf.connect(input_node, 'num_gb', resource_node, 'num_gb') + wf.connect(input_node, 'num_procs', resource_node, 'num_procs') + + # Run workflow + plugin_args = {'n_procs' : num_procs, + 'memory' : num_gb, + 'status_callback' : log_nodes_cb} + wf.run(plugin='MultiProc', plugin_args=plugin_args) + + # Get runtime stats from log file + finish_str = open(log_file, 'r').readlines()[1].rstrip('\n') + + # Delete wf base dir + shutil.rmtree(base_dir) + + # Return runtime stats + return finish_str + + # Test resources were used as expected in cmdline interface + @unittest.skipIf(run_profiler == False, skip_profile_msg) + def test_cmdline_profiling(self): + ''' + Test runtime profiler correctly records workflow RAM/CPUs consumption + from a cmdline function + ''' + + # Import packages + import json + import numpy as np + + # Init variables + num_gb = self.num_gb + num_procs = self.num_procs + + # Run workflow and get stats + finish_str = self._run_cmdline_workflow(num_gb, num_procs) + # Get runtime stats as dictionary + node_stats = json.loads(finish_str) + + # Read out runtime stats + runtime_gb = float(node_stats['runtime_memory_gb']) + runtime_procs = int(node_stats['runtime_threads']) + + # Get margin of error for RAM GB + allowed_gb_err = (self.mem_err_percent/100.0)*num_gb + runtime_gb_err = np.abs(runtime_gb-num_gb) + + # Error message formatting + mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ + 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) + procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ + % (num_procs, runtime_procs) + + # Assert runtime stats are what was input + self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) + self.assertEqual(num_procs, runtime_procs, msg=procs_err) + # Test resources were used as expected @unittest.skipIf(run_profiler == False, skip_profile_msg) - def test_wf_logfile(self): + def test_function_profiling(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption + from a python function ''' # Import packages import json + import numpy as np # Init variables - places = 1 + num_gb = self.num_gb + num_procs = self.num_procs # Run workflow and get stats - finish_str = self._run_workflow() + finish_str = self._run_function_workflow(num_gb, num_procs) # Get runtime stats as dictionary node_stats = json.loads(finish_str) @@ -181,16 +357,82 @@ def test_wf_logfile(self): runtime_gb = float(node_stats['runtime_memory_gb']) runtime_procs = int(node_stats['runtime_threads']) + # Get margin of error for RAM GB + allowed_gb_err = (self.mem_err_percent/100.0)*num_gb + runtime_gb_err = np.abs(runtime_gb-num_gb) + # Error message formatting - mem_err = 'Input memory: %.5f is not within %d places of runtime '\ - 'memory: %.5f' % (self.num_gb, places, runtime_gb) + mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ + 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ - % (self.num_procs, runtime_procs) + % (num_procs, runtime_procs) # Assert runtime stats are what was input - self.assertAlmostEqual(self.num_gb, runtime_gb, places=places, - msg=mem_err) - self.assertEqual(self.num_procs, runtime_procs, msg=procs_err) + self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) + self.assertEqual(num_procs, runtime_procs, msg=procs_err) + + # Collect stats for range of num_threads and memory amount + def _collect_range_runtime_stats(self): + ''' + Function to collect a range of runtime stats + ''' + + # Import packages + import json + import numpy as np + import pandas as pd + + # Init variables + num_procs_range = 8 + ram_gb_range = 10.0 + ram_gb_step = 0.25 + dict_list = [] + + # Iterate through all combos + for num_procs in np.arange(1, num_procs_range+1, 1): + for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): + # Cmd-level + cmd_fin_str = self._run_cmdline_workflow(num_gb, num_procs) + cmd_node_stats = json.loads(cmd_fin_str) + cmd_runtime_procs = int(cmd_node_stats['runtime_threads']) + cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) + + # Func-level + func_fin_str = self._run_function_workflow(num_gb, num_procs) + func_node_stats = json.loads(func_fin_str) + func_runtime_procs = int(func_node_stats['runtime_threads']) + func_runtime_gb = float(func_node_stats['runtime_memory_gb']) + + # Calc errors + cmd_procs_err = cmd_runtime_procs - num_procs + cmd_gb_err = cmd_runtime_gb - num_gb + func_procs_err = func_runtime_procs - num_procs + func_gb_err = func_runtime_gb - num_gb + + # Node dictionary + results_dict = {'input_procs' : num_procs, + 'input_gb' : num_gb, + 'cmd_runtime_procs' : cmd_runtime_procs, + 'cmd_runtime_gb' : cmd_runtime_gb, + 'func_runtime_procs' : func_runtime_procs, + 'func_runtime_gb' : func_runtime_gb, + 'cmd_procs_err' : cmd_procs_err, + 'cmd_gb_err' : cmd_gb_err, + 'func_procs_err' : func_procs_err, + 'func_gb_err' : func_gb_err} + # Append to list + dict_list.append(results_dict) + + # Create dataframe + runtime_results_df = pd.DataFrame(dict_list) + + # Return dataframe + return runtime_results_df + + def test_write_df_to_csv(self): + df = self._collect_range_runtime_stats() + df.to_csv('/home/dclark/runtime_results.csv') + #self.assertEqual(1, 1) # Command-line run-able unittest module diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index a12eb6ed24..b9483c2a27 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -21,7 +21,7 @@ def use_gb_ram(num_gb): # Spin CPU ctr = 0 - while ctr < 100e6: + while ctr < 10e6: ctr+= 1 # Clear memory @@ -34,6 +34,7 @@ if __name__ == '__main__': # Import packages import argparse + from threading import Thread from multiprocessing import Process # Init argparser @@ -61,3 +62,6 @@ if __name__ == '__main__': print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) for proc in proc_list: proc.start() + + for proc in proc_list: + proc.join() diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 37883d4e5c..7815074a82 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -440,16 +440,85 @@ def _add_output_traits(self, base): return base def _run_interface(self, runtime): + # Get workflow logger for runtime profile error reporting + from nipype import logging + logger = logging.getLogger('workflow') + + # Create function handle function_handle = create_function_from_source(self.inputs.function_str, self.imports) + # Wrapper for running function handle in multiprocessing.Process + # Can catch exceptions and report output via multiprocessing.Queue + def _function_handle_wrapper(queue, **kwargs): + try: + out = function_handle(**kwargs) + queue.put(out) + except Exception as exc: + queue.put(exc) + + # Get function args args = {} for name in self._input_names: value = getattr(self.inputs, name) if isdefined(value): args[name] = value - out = function_handle(**args) + # Runtime profiler on if dependecies available + try: + import memory_profiler + import psutil + from nipype.interfaces.base import get_max_resources_used + import multiprocessing + runtime_profile = True + except ImportError as exc: + logger.info('Unable to import packages needed for runtime profiling. '\ + 'Turning off runtime profiler. Reason: %s' % exc) + runtime_profile = False + + # Profile resources if set + #runtime_profile=False + if runtime_profile: + # Init communication queue and proc objs + queue = multiprocessing.Queue() + proc = multiprocessing.Process(target=_function_handle_wrapper, + args=(queue,), kwargs=args) + + # Init memory and threads before profiling + mem_mb = -1 + num_threads = -1 +# if function_handle.__name__ == 'use_resources': +# log_flg = True +# else: +# log_flg = False + log_flg = False + # Start process and profile while it's alive + proc.start() + while proc.is_alive(): + mem_mb, num_threads = \ + get_max_resources_used(proc.pid, mem_mb, num_threads, log_flg=log_flg) + + # Get result from process queue + out = queue.get() + # If it is an exception, raise it + if isinstance(out, Exception): + raise out + +# proc = (function_handle, (), args) +# num_threads = 1 +# print 'function_handle: ', function_handle.__name__ +# if function_handle.__name__ == 'use_resources': +# log_flg = True +# else: +# log_flg = False +# mem_mb, out = \ +# memory_profiler.memory_usage(proc, include_children=True, max_usage=True, retval=True, log_flg=log_flg) +# mem_mb = mem_mb[0] + # Function ran successfully, populate runtime stats + setattr(runtime, 'runtime_memory_gb', mem_mb/1024.0) + setattr(runtime, 'runtime_threads', num_threads) + else: + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out From e1d19cbdf10b18a3e9bce223b5824f37fe025ea8 Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Tue, 8 Mar 2016 17:17:56 -0500 Subject: [PATCH 34/78] improve thread draw algorithm --- nipype/utils/draw_gantt_chart.py | 222 ++++++++++++++++--------------- 1 file changed, 117 insertions(+), 105 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 81033f9022..72520910fd 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -4,50 +4,118 @@ callback_log.log_nodes_cb() """ +# Import packages # Import packages import json from dateutil import parser import datetime import random +import pandas as pd +import dateutil +from collections import OrderedDict -def log_to_json(logfile): - result = [] +def log_to_events(logfile): + events = [] with open(logfile, 'r') as content: - #read file separating each line content = content.read() lines = content.split('\n') - l = [] - for i in lines: + + for l in lines: + event = None try: - y = json.loads(i) - l.append(y) - except Exception: + event = json.loads(l) + except Exception, e: pass - lines = l + if not event: continue + + if 'start' in event: + event['type'] = 'start' + event['time'] = event['start'] + else: + event['type'] = 'finish' + event['time'] = event['finish'] + + events.append(event) + return events + +def log_to_dict(logfile): + + #keep track of important vars + nodes = [] #all the parsed nodes + unifinished_nodes = [] #all start nodes that dont have a finish yet + + with open(logfile, 'r') as content: + + #read file separating each line + content = content.read() + lines = content.split('\n') - last_node = [ x for x in lines if x.has_key('finish')][-1] + for l in lines: + #try to parse each line and transform in a json dict. + #if the line has a bad format, just skip + node = None + try: + node = json.loads(l) + except Exception, e: + pass - for i, line in enumerate(lines): - #get first start it finds - if not line.has_key('start'): + if not node: continue - #fint the end node for that start - for j in range(i+1, len(lines)): - if lines[j].has_key('finish'): - if lines[j]['id'] == line['id'] and \ - lines[j]['name'] == line['name']: - line['finish'] = lines[j]['finish'] - line['duration'] = (parser.parse(line['finish']) - \ - parser.parse(line['start'])).total_seconds() - result.append(line) + #if it is a start node, add to unifinished nodes + if 'start' in node: + node['start'] = parser.parse(node['start']) + unifinished_nodes.append(node) + + #if it is end node, look in uninished nodes for matching start + #remove from unifinished list and add to node list + elif 'finish' in node: + node['finish'] = parser.parse(node['finish']) + #because most nodes are small, we look backwards in the unfinished list + for s in range(len(unifinished_nodes)): + aux = unifinished_nodes[s] + #found the end for node start, copy over info + if aux['id'] == node['id'] and aux['name'] == node['name'] and aux['start'] < node['finish']: + node['start'] = aux['start'] + node['duration'] = (node['finish'] - node['start']).total_seconds() + + unifinished_nodes.remove(aux) + nodes.append(node) break - return result, last_node - + #finished parsing + #assume nodes without finish didn't finish running. + #set their finish to last node run + last_node = nodes[-1] + for n in unifinished_nodes: + n['finish'] = last_node['finish'] + n['duration'] = (n['finish'] - n['start']).total_seconds() + nodes.append(n) + + return nodes, last_node + +def calculate_resources(events, resource): + res = OrderedDict() + for event in events: + all_res = 0 + if event['type'] == "start": + all_res =+ int(float(event[resource])) + current_time = event['start']; + elif event['type'] == "finish": + all_res+ int(float(event[resource])) + current_time = event['finish']; + + res[current_time] = all_res + + timestamps = [dateutil.parser.parse(ts) for ts in res.keys()] + time_series = pd.Series(res.values(), timestamps) + interp_seq = pd.date_range(time_series.index[0], time_series.index[-1], freq='S') + interp_time_series = time_series.reindex(interp_seq) + interp_time_series = interp_time_series.fillna(method='ffill') + return interp_time_series #total duration in seconds def draw_lines(start, total_duration, minute_scale, scale): @@ -73,8 +141,8 @@ def draw_nodes(start, nodes, cores, scale, colors): end_times = [datetime.datetime(start.year, start.month, start.day, start.hour, start.minute, start.second) for x in range(cores)] for node in nodes: - node_start = parser.parse(node['start']) - node_finish = parser.parse(node['finish']) + node_start = node['start'] + node_finish = node['finish'] offset = ((node_start - start).total_seconds() / 60) * scale + 220 scale_duration = (node['duration'] / 60) * scale if scale_duration < 5: @@ -93,88 +161,34 @@ def draw_nodes(start, nodes, cores, scale, colors): node_finish.second) #end_times[j]+= datetime.timedelta(microseconds=node_finish.microsecond) break - - color = random.choice(colors) - new_node = "
"; + color = random.choice(colors) + new_node = "
"; result += new_node return result - -def draw_thread_bar(start, total_duration, nodes, space_between_minutes, minute_scale): +def draw_thread_bar(threads,space_between_minutes, minute_scale): result = "

Threads

" - total = total_duration/60 - thread = [0 for x in range(total)] - - now = start - - #calculate nuber of threads in every second - for i in range(total): - node_start = None - node_finish = None - - for j in range(i, len(nodes)): - node_start = parser.parse(nodes[j]['start']) - node_finish = parser.parse(nodes[j]['finish']) - - if node_start <= now and node_finish >= now: - thread[i] += nodes[j]['num_threads'] - if node_start > now: - break - now += datetime.timedelta(minutes=1) - - - #draw thread bar scale = float(space_between_minutes/float(minute_scale)) - - for i in range(len(thread)): - width = thread[i] * 10 - t = (i*scale*minute_scale) + 220 - bar = "
" + space_between_minutes = float(space_between_minutes/60.0) + for i in range(len(threads)): + width = threads[i] * 10 + t = (float(i*scale*minute_scale)/60.0) + 220 + bar = "
" result += bar return result - -def draw_memory_bar(start, total_duration, nodes, - space_between_minutes, minute_scale): +def draw_memory_bar(memory, space_between_minutes, minute_scale): result = "

Memory

" - total = total_duration/60 - memory = [0 for x in range(total)] - - now = start - - #calculate nuber of threads in every second - for i in range(total): - node_start = None - node_finish = None - - for j in range(i, len(nodes)): - node_start = parser.parse(nodes[j]['start']) - node_finish = parser.parse(nodes[j]['finish']) - - if node_start <= now and node_finish >= now: - memory[i] += nodes[j]['estimated_memory_gb'] - if node_start > now: - break - now += datetime.timedelta(minutes=1) - - - #draw thread bar scale = float(space_between_minutes/float(minute_scale)) + space_between_minutes = float(space_between_minutes/60.0) for i in range(len(memory)): width = memory[i] * 10 - t = (i*scale*minute_scale) + 220 - bar = "
" + t = (float(i*scale*minute_scale)/60.0) + 220 + bar = "
" result += bar return result @@ -207,7 +221,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, # generate_gantt_chart('callback.log', 8) ''' - result, last_node = log_to_json(logfile) + result, last_node = log_to_dict(logfile) scale = space_between_minutes #add the html header @@ -262,24 +276,22 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, #create the header of the report with useful information - start = parser.parse(result[0]['start']) - duration = int((parser.parse(last_node['finish']) - start).total_seconds()) + start = result[0]['start'] + duration = (last_node['finish'] - start).total_seconds() - html_string += '

Start: '+ result[0]['start'] +'

' - html_string += '

Finish: '+ last_node['finish'] +'

' + html_string += '

Start: '+ result[0]['start'].strftime("%Y-%m-%d %H:%M:%S") +'

' + html_string += '

Finish: '+ last_node['finish'].strftime("%Y-%m-%d %H:%M:%S") +'

' html_string += '

Duration: '+ str(duration/60) +' minutes

' html_string += '

Nodes: '+str(len(result))+'

' html_string += '

Cores: '+str(cores)+'

' + result = log_to_events(logfile) + threads = calculate_resources(result, 'num_threads') + html_string += draw_thread_bar(threads, space_between_minutes, minute_scale) - #draw lines - html_string += draw_lines(start, duration, minute_scale, scale) - - #draw nodes - html_string += draw_nodes(start, result, cores, scale, colors) + memory = calculate_resources(result, 'estimated_memory_gb') + html_string += draw_memory_bar(memory, space_between_minutes, minute_scale) - #html_string += draw_thread_bar(start, duration, result, space_between_minutes, minute_scale) - #html_string += draw_memory_bar(start, duration, result, space_between_minutes, minute_scale) #finish html html_string+= ''' @@ -289,4 +301,4 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, #save file html_file = open(logfile +'.html', 'wb') html_file.write(html_string) - html_file.close() + html_file.close() \ No newline at end of file From 0fdc671cfd26c271535287d5ca19f90cd1f9b67a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 9 Mar 2016 15:02:54 -0500 Subject: [PATCH 35/78] Wrote my own get memory function - seems to work much better --- nipype/interfaces/base.py | 53 +++++++- .../interfaces/tests/test_runtime_profiler.py | 127 +++++++++--------- nipype/interfaces/utility.py | 2 +- 3 files changed, 114 insertions(+), 68 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 04c45747a0..b543a56061 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1249,9 +1249,57 @@ def _get_num_threads(proc, log_flg=False): # Return the number of threads found return num_threads +def _get_num_ram_mb(pid, pyfunc=False): + """Function to get the RAM usage of a process and its children + + Parameters + ---------- + pid : integer + the PID of the process to get RAM usage of + pyfunc : boolean (optional); default=False + a flag to indicate if the process is a python function; + when Pythons are multithreaded via multiprocess or threading, + children functions include their own memory + parents. if this + is set, the parent memory will removed from children memories + + Reference: http://ftp.dev411.com/t/python/python-list/095thexx8g/multiprocessing-forking-memory-usage + + Returns + ------- + mem_mb : float + the memory RAM in MB utilized by the process PID + """ + + # Import packages + import psutil + + # Init variables + _MB = 1024.0**2 + + # Try block to protect against any dying processes in the interim + try: + # Init parent + parent = psutil.Process(pid) + # Get memory of parent + parent_mem = parent.memory_info().rss + mem_mb = parent_mem/_MB + + # Iterate through child processes + for child in parent.children(recursive=True): + child_mem = child.memory_info().rss + if pyfunc: + child_mem -= parent_mem + mem_mb += child_mem/_MB + + # Catch if process dies, return gracefully + except psutil.NoSuchProcess: + pass + + # Return memory + return mem_mb # Get max resources used for process -def get_max_resources_used(pid, mem_mb, num_threads, log_flg=False): +def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False, log_flg=False): """Function to get the RAM and threads usage of a process Paramters @@ -1276,7 +1324,8 @@ def get_max_resources_used(pid, mem_mb, num_threads, log_flg=False): import psutil try: - mem_mb = max(mem_mb, _get_memory(pid, include_children=True, log_flg=log_flg)) + #mem_mb = max(mem_mb, _get_memory(pid, include_children=True, log_flg=log_flg)) + mem_mb = max(mem_mb, _get_num_ram_mb(pid, pyfunc=pyfunc)) num_threads = max(num_threads, _get_num_threads(psutil.Process(pid), log_flg=log_flg)) except Exception as exc: iflogger.info('Could not get resources used by process. Error: %s'\ diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index e11a95e25d..56b0785067 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -101,7 +101,7 @@ def _use_gb_ram(num_gb): print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) for idx, proc in enumerate(proc_list): proc.start() - logger.debug('Starting PID: %d' % proc.pid) + #logger.debug('Starting PID: %d' % proc.pid) for proc in proc_list: proc.join() @@ -137,12 +137,72 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb= .75 + self.num_gb= 4 # Input number of processors self.num_procs = 1 # Acceptable percent error for memory profiled against input self.mem_err_percent = 5 + # ! Only used for benchmarking the profiler over a range of + # ! processors and RAM usage + # ! Requires a LOT of RAM and PROCS to be tested + def _collect_range_runtime_stats(self): + ''' + Function to collect a range of runtime stats + ''' + + # Import packages + import json + import numpy as np + import pandas as pd + + # Init variables + num_procs_range = 8 + ram_gb_range = 10.0 + ram_gb_step = 0.25 + dict_list = [] + + # Iterate through all combos + for num_procs in np.arange(1, num_procs_range+1, 1): + for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): + # Cmd-level + cmd_fin_str = self._run_cmdline_workflow(num_gb, num_procs) + cmd_node_stats = json.loads(cmd_fin_str) + cmd_runtime_procs = int(cmd_node_stats['runtime_threads']) + cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) + + # Func-level + func_fin_str = self._run_function_workflow(num_gb, num_procs) + func_node_stats = json.loads(func_fin_str) + func_runtime_procs = int(func_node_stats['runtime_threads']) + func_runtime_gb = float(func_node_stats['runtime_memory_gb']) + + # Calc errors + cmd_procs_err = cmd_runtime_procs - num_procs + cmd_gb_err = cmd_runtime_gb - num_gb + func_procs_err = func_runtime_procs - num_procs + func_gb_err = func_runtime_gb - num_gb + + # Node dictionary + results_dict = {'input_procs' : num_procs, + 'input_gb' : num_gb, + 'cmd_runtime_procs' : cmd_runtime_procs, + 'cmd_runtime_gb' : cmd_runtime_gb, + 'func_runtime_procs' : func_runtime_procs, + 'func_runtime_gb' : func_runtime_gb, + 'cmd_procs_err' : cmd_procs_err, + 'cmd_gb_err' : cmd_gb_err, + 'func_procs_err' : func_procs_err, + 'func_gb_err' : func_gb_err} + # Append to list + dict_list.append(results_dict) + + # Create dataframe + runtime_results_df = pd.DataFrame(dict_list) + + # Return dataframe + return runtime_results_df + # Test node def _run_cmdline_workflow(self, num_gb, num_procs): ''' @@ -371,69 +431,6 @@ def test_function_profiling(self): self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) self.assertEqual(num_procs, runtime_procs, msg=procs_err) - # Collect stats for range of num_threads and memory amount - def _collect_range_runtime_stats(self): - ''' - Function to collect a range of runtime stats - ''' - - # Import packages - import json - import numpy as np - import pandas as pd - - # Init variables - num_procs_range = 8 - ram_gb_range = 10.0 - ram_gb_step = 0.25 - dict_list = [] - - # Iterate through all combos - for num_procs in np.arange(1, num_procs_range+1, 1): - for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): - # Cmd-level - cmd_fin_str = self._run_cmdline_workflow(num_gb, num_procs) - cmd_node_stats = json.loads(cmd_fin_str) - cmd_runtime_procs = int(cmd_node_stats['runtime_threads']) - cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) - - # Func-level - func_fin_str = self._run_function_workflow(num_gb, num_procs) - func_node_stats = json.loads(func_fin_str) - func_runtime_procs = int(func_node_stats['runtime_threads']) - func_runtime_gb = float(func_node_stats['runtime_memory_gb']) - - # Calc errors - cmd_procs_err = cmd_runtime_procs - num_procs - cmd_gb_err = cmd_runtime_gb - num_gb - func_procs_err = func_runtime_procs - num_procs - func_gb_err = func_runtime_gb - num_gb - - # Node dictionary - results_dict = {'input_procs' : num_procs, - 'input_gb' : num_gb, - 'cmd_runtime_procs' : cmd_runtime_procs, - 'cmd_runtime_gb' : cmd_runtime_gb, - 'func_runtime_procs' : func_runtime_procs, - 'func_runtime_gb' : func_runtime_gb, - 'cmd_procs_err' : cmd_procs_err, - 'cmd_gb_err' : cmd_gb_err, - 'func_procs_err' : func_procs_err, - 'func_gb_err' : func_gb_err} - # Append to list - dict_list.append(results_dict) - - # Create dataframe - runtime_results_df = pd.DataFrame(dict_list) - - # Return dataframe - return runtime_results_df - - def test_write_df_to_csv(self): - df = self._collect_range_runtime_stats() - df.to_csv('/home/dclark/runtime_results.csv') - #self.assertEqual(1, 1) - # Command-line run-able unittest module if __name__ == '__main__': diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 7815074a82..d7f9008c95 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -496,7 +496,7 @@ def _function_handle_wrapper(queue, **kwargs): proc.start() while proc.is_alive(): mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, log_flg=log_flg) + get_max_resources_used(proc.pid, mem_mb, num_threads, pyfunc=True, log_flg=log_flg) # Get result from process queue out = queue.get() From 9ad5d24022523d5842708b5ceb33150925844214 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 10 Mar 2016 15:09:15 -0500 Subject: [PATCH 36/78] Restructured unittests and num_threads logic --- nipype/interfaces/base.py | 39 +++++--------- .../interfaces/tests/test_runtime_profiler.py | 54 +++++++++---------- nipype/interfaces/tests/use_resources | 5 +- nipype/interfaces/utility.py | 24 ++------- 4 files changed, 45 insertions(+), 77 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index b543a56061..bf03b9ee75 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1205,8 +1205,9 @@ def _read(self, drain): # Get number of threads for process -def _get_num_threads(proc, log_flg=False): +def _get_num_threads(proc): """Function to get the number of threads a process is using + NOTE: If Parameters ---------- @@ -1221,35 +1222,23 @@ def _get_num_threads(proc, log_flg=False): # Import packages import psutil - import logging # Init variables num_threads = proc.num_threads() - if log_flg: - from CPAC.utils.utils import setup_logger - logger = setup_logger('memory_profiler', '/home/dclark/memory_profiler.log', - logging.INFO, to_screen=False) + # Iterate through child processes and get number of their threads try: - num_children = len(proc.children()) - if log_flg: - logger.debug('len(proc.children()): %d' % num_children) - logger.debug('proc.id: %s' % str(proc.pid)) - for child in proc.children(): - if log_flg: - logger.debug('child.pid: %d' % child.pid) - logger.debug('child.threads(): %s' % str(child.threads())) - logger.debug('child.num_threads(): %d' % child.num_threads()) - logger.debug('len(child.children()): %d' % len(child.children())) - num_threads = max(num_threads, num_children, - child.num_threads(), len(child.children())) + for child in proc.children(recursive=True): + num_threads += child.num_threads() except psutil.NoSuchProcess: pass - # Return the number of threads found + # Return number of threads found return num_threads -def _get_num_ram_mb(pid, pyfunc=False): + +# Get ram usage of process +def _get_ram_mb(pid, pyfunc=False): """Function to get the RAM usage of a process and its children Parameters @@ -1298,8 +1287,9 @@ def _get_num_ram_mb(pid, pyfunc=False): # Return memory return mem_mb + # Get max resources used for process -def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False, log_flg=False): +def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): """Function to get the RAM and threads usage of a process Paramters @@ -1320,13 +1310,11 @@ def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False, log_flg=False """ # Import packages - from memory_profiler import _get_memory import psutil try: - #mem_mb = max(mem_mb, _get_memory(pid, include_children=True, log_flg=log_flg)) - mem_mb = max(mem_mb, _get_num_ram_mb(pid, pyfunc=pyfunc)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(pid), log_flg=log_flg)) + mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(pid))) except Exception as exc: iflogger.info('Could not get resources used by process. Error: %s'\ % exc) @@ -1346,7 +1334,6 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Default to profiling the runtime try: - import memory_profiler import psutil runtime_profile = True except ImportError as exc: diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 56b0785067..eedd933206 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -12,7 +12,6 @@ try: import psutil - import memory_profiler run_profiler = True skip_profile_msg = 'Run profiler tests' except ImportError as exc: @@ -28,9 +27,9 @@ class UseResourcesInputSpec(CommandLineInputSpec): # Init attributes num_gb = traits.Float(desc='Number of GB of RAM to use', - argstr = "-g %f") + argstr='-g %f') num_procs = traits.Int(desc='Number of processors to use', - argstr = "-p %d") + argstr='-p %d') # UseResources interface @@ -78,30 +77,21 @@ def _use_gb_ram(num_gb): del gb_str # Import packages - import logging - from multiprocessing import Process - from threading import Thread # Init variables num_gb = float(num_gb) - # Init variables - #num_threads = proc.num_threads() - from CPAC.utils.utils import setup_logger + # Build proc list proc_list = [] for idx in range(num_procs): - #proc = Thread(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) - proc = Process(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) + proc = Thread(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) proc_list.append(proc) - logger = setup_logger('memory_profiler', '/home/dclark/memory_profiler.log', - logging.DEBUG, to_screen=False) # Run multi-threaded - print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) + print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs) for idx, proc in enumerate(proc_list): proc.start() - #logger.debug('Starting PID: %d' % proc.pid) for proc in proc_list: proc.join() @@ -137,9 +127,9 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb= 4 - # Input number of processors - self.num_procs = 1 + self.num_gb = 6 + # Input number of sub-threads (not including parent threads) + self.num_threads = 7 # Acceptable percent error for memory profiled against input self.mem_err_percent = 5 @@ -367,30 +357,33 @@ def test_cmdline_profiling(self): # Init variables num_gb = self.num_gb - num_procs = self.num_procs + num_threads = self.num_threads # Run workflow and get stats - finish_str = self._run_cmdline_workflow(num_gb, num_procs) + finish_str = self._run_cmdline_workflow(num_gb, num_threads) # Get runtime stats as dictionary node_stats = json.loads(finish_str) # Read out runtime stats runtime_gb = float(node_stats['runtime_memory_gb']) - runtime_procs = int(node_stats['runtime_threads']) + runtime_threads = int(node_stats['runtime_threads']) # Get margin of error for RAM GB allowed_gb_err = (self.mem_err_percent/100.0)*num_gb runtime_gb_err = np.abs(runtime_gb-num_gb) + # Runtime threads should reflect shell-cmd thread, Python parent thread + # and Python sub-threads = 1 + 1 + num_threads + expected_runtime_threads = 1 + 1 + num_threads # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) - procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ - % (num_procs, runtime_procs) + procs_err = 'Input threads: %d is not equal to runtime threads: %d' \ + % (expected_runtime_threads, runtime_threads) # Assert runtime stats are what was input self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) - self.assertEqual(num_procs, runtime_procs, msg=procs_err) + self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err) # Test resources were used as expected @unittest.skipIf(run_profiler == False, skip_profile_msg) @@ -406,30 +399,33 @@ def test_function_profiling(self): # Init variables num_gb = self.num_gb - num_procs = self.num_procs + num_threads = self.num_threads # Run workflow and get stats - finish_str = self._run_function_workflow(num_gb, num_procs) + finish_str = self._run_function_workflow(num_gb, num_threads) # Get runtime stats as dictionary node_stats = json.loads(finish_str) # Read out runtime stats runtime_gb = float(node_stats['runtime_memory_gb']) - runtime_procs = int(node_stats['runtime_threads']) + runtime_threads = int(node_stats['runtime_threads']) # Get margin of error for RAM GB allowed_gb_err = (self.mem_err_percent/100.0)*num_gb runtime_gb_err = np.abs(runtime_gb-num_gb) + # Runtime threads should reflect Python parent thread + # and Python sub-threads = 1 + num_threads + expected_runtime_threads = 1 + num_threads # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ - % (num_procs, runtime_procs) + % (expected_runtime_threads, runtime_threads) # Assert runtime stats are what was input self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) - self.assertEqual(num_procs, runtime_procs, msg=procs_err) + self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err) # Command-line run-able unittest module diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index b9483c2a27..69ce6eb819 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -35,7 +35,6 @@ if __name__ == '__main__': # Import packages import argparse from threading import Thread - from multiprocessing import Process # Init argparser parser = argparse.ArgumentParser(description=__doc__) @@ -56,10 +55,10 @@ if __name__ == '__main__': # Build proc list proc_list = [] for idx in range(num_procs): - proc_list.append(Process(target=use_gb_ram, args=(num_gb/num_procs,))) + proc_list.append(Thread(target=use_gb_ram, args=(num_gb/num_procs,))) # Run multi-threaded - print 'Using %.3f GB of memory over %d processors...' % (num_gb, num_procs) + print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs) for proc in proc_list: proc.start() diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index d7f9008c95..0443013a9c 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -466,7 +466,6 @@ def _function_handle_wrapper(queue, **kwargs): # Runtime profiler on if dependecies available try: - import memory_profiler import psutil from nipype.interfaces.base import get_max_resources_used import multiprocessing @@ -483,37 +482,24 @@ def _function_handle_wrapper(queue, **kwargs): queue = multiprocessing.Queue() proc = multiprocessing.Process(target=_function_handle_wrapper, args=(queue,), kwargs=args) - + # Init memory and threads before profiling mem_mb = -1 num_threads = -1 -# if function_handle.__name__ == 'use_resources': -# log_flg = True -# else: -# log_flg = False - log_flg = False + # Start process and profile while it's alive proc.start() while proc.is_alive(): mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, pyfunc=True, log_flg=log_flg) - + get_max_resources_used(proc.pid, mem_mb, num_threads, + pyfunc=True) + # Get result from process queue out = queue.get() # If it is an exception, raise it if isinstance(out, Exception): raise out -# proc = (function_handle, (), args) -# num_threads = 1 -# print 'function_handle: ', function_handle.__name__ -# if function_handle.__name__ == 'use_resources': -# log_flg = True -# else: -# log_flg = False -# mem_mb, out = \ -# memory_profiler.memory_usage(proc, include_children=True, max_usage=True, retval=True, log_flg=log_flg) -# mem_mb = mem_mb[0] # Function ran successfully, populate runtime stats setattr(runtime, 'runtime_memory_gb', mem_mb/1024.0) setattr(runtime, 'runtime_threads', num_threads) From a52395ad00f80edbb5c322fc017297c2d84097cc Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 10 Mar 2016 15:11:51 -0500 Subject: [PATCH 37/78] Ignored sleeping --- nipype/interfaces/base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index bf03b9ee75..ca5d30a5eb 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1400,7 +1400,7 @@ def _process(drain=0): get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() _process() - time.sleep(interval) + #time.sleep(interval) _process(drain=1) # collect results, merge and return @@ -1419,7 +1419,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - time.sleep(interval) + #time.sleep(interval) stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): try: @@ -1441,7 +1441,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - time.sleep(interval) + #time.sleep(interval) ret_code = proc.wait() stderr.flush() stdout.flush() @@ -1454,7 +1454,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - time.sleep(interval) + #time.sleep(interval) proc.communicate() result['stdout'] = [] result['stderr'] = [] From 2b0a6e25ff5a955a2ccbef3feee35e4215eb75ec Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 10 Mar 2016 20:35:09 +0000 Subject: [PATCH 38/78] Remove proc terminology from variable names --- .../interfaces/tests/test_runtime_profiler.py | 138 +++++++++--------- nipype/interfaces/tests/use_resources | 28 ++-- 2 files changed, 85 insertions(+), 81 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index eedd933206..0e9023128f 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -28,7 +28,7 @@ class UseResourcesInputSpec(CommandLineInputSpec): # Init attributes num_gb = traits.Float(desc='Number of GB of RAM to use', argstr='-g %f') - num_procs = traits.Int(desc='Number of processors to use', + num_threads = traits.Int(desc='Number of threads to use', argstr='-p %d') @@ -52,8 +52,8 @@ class UseResources(CommandLine): _cmd = exec_path -# Spin multiple processors -def use_resources(num_procs, num_gb): +# Spin multiple threads +def use_resources(num_threads, num_gb): ''' Function to execute multiple use_gb_ram functions in parallel ''' @@ -82,19 +82,19 @@ def _use_gb_ram(num_gb): # Init variables num_gb = float(num_gb) - # Build proc list - proc_list = [] - for idx in range(num_procs): - proc = Thread(target=_use_gb_ram, args=(num_gb/num_procs,), name=str(idx)) - proc_list.append(proc) + # Build thread list + thread_list = [] + for idx in range(num_threads): + thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), name=str(idx)) + thread_list.append(thread) # Run multi-threaded - print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs) - for idx, proc in enumerate(proc_list): - proc.start() + print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads) + for idx, thread in enumerate(thread_list): + thread.start() - for proc in proc_list: - proc.join() + for thread in thread_list: + thread.join() # Test case for the run function @@ -134,9 +134,9 @@ def setUp(self): self.mem_err_percent = 5 # ! Only used for benchmarking the profiler over a range of - # ! processors and RAM usage - # ! Requires a LOT of RAM and PROCS to be tested - def _collect_range_runtime_stats(self): + # ! RAM usage + # ! Requires a LOT of RAM to be tested + def _collect_range_runtime_stats(self, num_threads): ''' Function to collect a range of runtime stats ''' @@ -147,45 +147,43 @@ def _collect_range_runtime_stats(self): import pandas as pd # Init variables - num_procs_range = 8 ram_gb_range = 10.0 ram_gb_step = 0.25 dict_list = [] # Iterate through all combos - for num_procs in np.arange(1, num_procs_range+1, 1): - for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): - # Cmd-level - cmd_fin_str = self._run_cmdline_workflow(num_gb, num_procs) - cmd_node_stats = json.loads(cmd_fin_str) - cmd_runtime_procs = int(cmd_node_stats['runtime_threads']) - cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) - - # Func-level - func_fin_str = self._run_function_workflow(num_gb, num_procs) - func_node_stats = json.loads(func_fin_str) - func_runtime_procs = int(func_node_stats['runtime_threads']) - func_runtime_gb = float(func_node_stats['runtime_memory_gb']) - - # Calc errors - cmd_procs_err = cmd_runtime_procs - num_procs - cmd_gb_err = cmd_runtime_gb - num_gb - func_procs_err = func_runtime_procs - num_procs - func_gb_err = func_runtime_gb - num_gb - - # Node dictionary - results_dict = {'input_procs' : num_procs, - 'input_gb' : num_gb, - 'cmd_runtime_procs' : cmd_runtime_procs, - 'cmd_runtime_gb' : cmd_runtime_gb, - 'func_runtime_procs' : func_runtime_procs, - 'func_runtime_gb' : func_runtime_gb, - 'cmd_procs_err' : cmd_procs_err, - 'cmd_gb_err' : cmd_gb_err, - 'func_procs_err' : func_procs_err, - 'func_gb_err' : func_gb_err} - # Append to list - dict_list.append(results_dict) + for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): + # Cmd-level + cmd_fin_str = self._run_cmdline_workflow(num_gb, num_threads) + cmd_node_stats = json.loads(cmd_fin_str) + cmd_runtime_threads = int(cmd_node_stats['runtime_threads']) + cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) + + # Func-level + func_fin_str = self._run_function_workflow(num_gb, num_threads) + func_node_stats = json.loads(func_fin_str) + func_runtime_threads = int(func_node_stats['runtime_threads']) + func_runtime_gb = float(func_node_stats['runtime_memory_gb']) + + # Calc errors + cmd_threads_err = cmd_threads_threads - num_threads + cmd_gb_err = cmd_runtime_gb - num_gb + func_threads_err = func_runtime_threads - num_threads + func_gb_err = func_runtime_gb - num_gb + + # Node dictionary + results_dict = {'input_threads' : num_threads, + 'input_gb' : num_gb, + 'cmd_runtime_threads' : cmd_runtime_threads, + 'cmd_runtime_gb' : cmd_runtime_gb, + 'func_runtime_threads' : func_runtime_threads, + 'func_runtime_gb' : func_runtime_gb, + 'cmd_thread_err' : cmd_thread_err, + 'cmd_gb_err' : cmd_gb_err, + 'func_thread_err' : func_thread_err, + 'func_gb_err' : func_gb_err} + # Append to list + dict_list.append(results_dict) # Create dataframe runtime_results_df = pd.DataFrame(dict_list) @@ -193,8 +191,14 @@ def _collect_range_runtime_stats(self): # Return dataframe return runtime_results_df + def tiest_collect_range(self): + num_threads = 1 + df = self._collect_range_suntime_stats(num_threads) + + df.to_csv('/root/%d_thread_df.csv') + # Test node - def _run_cmdline_workflow(self, num_gb, num_procs): + def _run_cmdline_workflow(self, num_gb, num_threads): ''' Function to run the use_resources cmdline script in a nipype workflow and return the runtime stats recorded by the profiler @@ -237,22 +241,22 @@ def _run_cmdline_workflow(self, num_gb, num_procs): # Input node input_node = pe.Node(util.IdentityInterface(fields=['num_gb', - 'num_procs']), + 'num_threads']), name='input_node') input_node.inputs.num_gb = num_gb - input_node.inputs.num_procs = num_procs + input_node.inputs.num_threads = num_threads # Resources used node resource_node = pe.Node(UseResources(), name='resource_node') resource_node.interface.estimated_memory_gb = num_gb - resource_node.interface.num_threads = num_procs + resource_node.interface.num_threads = num_threads # Connect workflow wf.connect(input_node, 'num_gb', resource_node, 'num_gb') - wf.connect(input_node, 'num_procs', resource_node, 'num_procs') + wf.connect(input_node, 'num_threads', resource_node, 'num_threads') # Run workflow - plugin_args = {'n_procs' : num_procs, + plugin_args = {'n_procs' : num_threads, 'memory' : num_gb, 'status_callback' : log_nodes_cb} wf.run(plugin='MultiProc', plugin_args=plugin_args) @@ -267,7 +271,7 @@ def _run_cmdline_workflow(self, num_gb, num_procs): return finish_str # Test node - def _run_function_workflow(self, num_gb, num_procs): + def _run_function_workflow(self, num_gb, num_threads): ''' Function to run the use_resources() function in a nipype workflow and return the runtime stats recorded by the profiler @@ -310,26 +314,26 @@ def _run_function_workflow(self, num_gb, num_procs): # Input node input_node = pe.Node(util.IdentityInterface(fields=['num_gb', - 'num_procs']), + 'num_threads']), name='input_node') input_node.inputs.num_gb = num_gb - input_node.inputs.num_procs = num_procs + input_node.inputs.num_threads = num_threads # Resources used node - resource_node = pe.Node(util.Function(input_names=['num_procs', + resource_node = pe.Node(util.Function(input_names=['num_threads', 'num_gb'], output_names=[], function=use_resources), name='resource_node') resource_node.interface.estimated_memory_gb = num_gb - resource_node.interface.num_threads = num_procs + resource_node.interface.num_threads = num_threads # Connect workflow wf.connect(input_node, 'num_gb', resource_node, 'num_gb') - wf.connect(input_node, 'num_procs', resource_node, 'num_procs') + wf.connect(input_node, 'num_threads', resource_node, 'num_threads') # Run workflow - plugin_args = {'n_procs' : num_procs, + plugin_args = {'n_procs' : num_threads, 'memory' : num_gb, 'status_callback' : log_nodes_cb} wf.run(plugin='MultiProc', plugin_args=plugin_args) @@ -378,12 +382,12 @@ def test_cmdline_profiling(self): # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) - procs_err = 'Input threads: %d is not equal to runtime threads: %d' \ + threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ % (expected_runtime_threads, runtime_threads) # Assert runtime stats are what was input self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) - self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err) + self.assertEqual(expected_runtime_threads, runtime_threads, msg=threads_err) # Test resources were used as expected @unittest.skipIf(run_profiler == False, skip_profile_msg) @@ -420,12 +424,12 @@ def test_function_profiling(self): # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) - procs_err = 'Input procs: %d is not equal to runtime procs: %d' \ + threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ % (expected_runtime_threads, runtime_threads) # Assert runtime stats are what was input self.assertLessEqual(runtime_gb_err, allowed_gb_err, msg=mem_err) - self.assertEqual(expected_runtime_threads, runtime_threads, msg=procs_err) + self.assertEqual(expected_runtime_threads, runtime_threads, msg=threads_err) # Command-line run-able unittest module diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index 69ce6eb819..60639398ac 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -4,10 +4,10 @@ ''' Python script to use a certain amount of RAM on disk and number of -processors +threads Usage: - use_resources -g -p + use_resources -g -p ''' # Function to occupy GB of memory @@ -42,25 +42,25 @@ if __name__ == '__main__': # Add arguments parser.add_argument('-g', '--num_gb', nargs=1, required=True, help='Number of GB RAM to use, can be float or int') - parser.add_argument('-p', '--num_procs', nargs=1, required=True, - help='Number of processors to run in parallel') + parser.add_argument('-p', '--num_threads', nargs=1, required=True, + help='Number of threads to run in parallel') # Parse args args = parser.parse_args() # Init variables num_gb = float(args.num_gb[0]) - num_procs = int(args.num_procs[0]) + num_threads = int(args.num_threads[0]) - # Build proc list - proc_list = [] - for idx in range(num_procs): - proc_list.append(Thread(target=use_gb_ram, args=(num_gb/num_procs,))) + # Build thread list + thread_list = [] + for idx in range(num_threads): + thread_list.append(Thread(target=use_gb_ram, args=(num_gb/num_threads,))) # Run multi-threaded - print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_procs) - for proc in proc_list: - proc.start() + print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads) + for thread in thread_list: + thread.start() - for proc in proc_list: - proc.join() + for thread in thread_list: + thread.join() From 97d33bb2a2aee281d8fbd528524837ce4740266f Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 11 Mar 2016 19:01:01 +0000 Subject: [PATCH 39/78] Updated dictionary names for results dict --- .../interfaces/tests/test_runtime_profiler.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 0e9023128f..fa8e2c9154 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -166,7 +166,7 @@ def _collect_range_runtime_stats(self, num_threads): func_runtime_gb = float(func_node_stats['runtime_memory_gb']) # Calc errors - cmd_threads_err = cmd_threads_threads - num_threads + cmd_threads_err = cmd_runtime_threads - num_threads cmd_gb_err = cmd_runtime_gb - num_gb func_threads_err = func_runtime_threads - num_threads func_gb_err = func_runtime_gb - num_gb @@ -178,9 +178,9 @@ def _collect_range_runtime_stats(self, num_threads): 'cmd_runtime_gb' : cmd_runtime_gb, 'func_runtime_threads' : func_runtime_threads, 'func_runtime_gb' : func_runtime_gb, - 'cmd_thread_err' : cmd_thread_err, + 'cmd_threads_err' : cmd_threads_err, 'cmd_gb_err' : cmd_gb_err, - 'func_thread_err' : func_thread_err, + 'func_threads_err' : func_threads_err, 'func_gb_err' : func_gb_err} # Append to list dict_list.append(results_dict) @@ -191,11 +191,11 @@ def _collect_range_runtime_stats(self, num_threads): # Return dataframe return runtime_results_df - def tiest_collect_range(self): - num_threads = 1 - df = self._collect_range_suntime_stats(num_threads) + def test_collect_range(self): + num_threads = 8 + df = self._collect_range_runtime_stats(num_threads) - df.to_csv('/root/%d_thread_df.csv') + df.to_csv('/root/%d_thread_df.csv' % num_threads) # Test node def _run_cmdline_workflow(self, num_gb, num_threads): @@ -349,7 +349,7 @@ def _run_function_workflow(self, num_gb, num_threads): # Test resources were used as expected in cmdline interface @unittest.skipIf(run_profiler == False, skip_profile_msg) - def test_cmdline_profiling(self): + def tiest_cmdline_profiling(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption from a cmdline function @@ -391,7 +391,7 @@ def test_cmdline_profiling(self): # Test resources were used as expected @unittest.skipIf(run_profiler == False, skip_profile_msg) - def test_function_profiling(self): + def tiest_function_profiling(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption from a python function From 36ded7b88e0ad63d45e744717a3d49e63ac78983 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 11 Mar 2016 14:49:01 -0500 Subject: [PATCH 40/78] Added recording timestamps --- .../interfaces/tests/test_runtime_profiler.py | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index fa8e2c9154..a2c1c691e8 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -154,16 +154,20 @@ def _collect_range_runtime_stats(self, num_threads): # Iterate through all combos for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): # Cmd-level - cmd_fin_str = self._run_cmdline_workflow(num_gb, num_threads) + cmd_start_str, cmd_fin_str = self._run_cmdline_workflow(num_gb, num_threads) + cmd_start_ts = json.loads(cmd_start_str)['start'] cmd_node_stats = json.loads(cmd_fin_str) cmd_runtime_threads = int(cmd_node_stats['runtime_threads']) cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) + cmd_finish_ts = cmd_node_stats['finish'] # Func-level - func_fin_str = self._run_function_workflow(num_gb, num_threads) + func_start_str, func_fin_str = self._run_function_workflow(num_gb, num_threads) + func_start_ts = json.loads(func_start_str)['start'] func_node_stats = json.loads(func_fin_str) func_runtime_threads = int(func_node_stats['runtime_threads']) func_runtime_gb = float(func_node_stats['runtime_memory_gb']) + func_finish_ts = func_node_stats['finish'] # Calc errors cmd_threads_err = cmd_runtime_threads - num_threads @@ -181,7 +185,11 @@ def _collect_range_runtime_stats(self, num_threads): 'cmd_threads_err' : cmd_threads_err, 'cmd_gb_err' : cmd_gb_err, 'func_threads_err' : func_threads_err, - 'func_gb_err' : func_gb_err} + 'func_gb_err' : func_gb_err, + 'cmd_start_ts' : cmd_start_ts, + 'cmd_finish_ts' : cmd_finish_ts, + 'func_start_ts' : func_start_ts, + 'func_finish_ts' : func_finish_ts} # Append to list dict_list.append(results_dict) @@ -262,13 +270,14 @@ def _run_cmdline_workflow(self, num_gb, num_threads): wf.run(plugin='MultiProc', plugin_args=plugin_args) # Get runtime stats from log file + start_str = open(log_file, 'r').readlines()[0].rstrip('\n') finish_str = open(log_file, 'r').readlines()[1].rstrip('\n') # Delete wf base dir shutil.rmtree(base_dir) # Return runtime stats - return finish_str + return start_str, finish_str # Test node def _run_function_workflow(self, num_gb, num_threads): @@ -339,13 +348,14 @@ def _run_function_workflow(self, num_gb, num_threads): wf.run(plugin='MultiProc', plugin_args=plugin_args) # Get runtime stats from log file + start_str = open(log_file, 'r').readlines()[0].rstrip('\n') finish_str = open(log_file, 'r').readlines()[1].rstrip('\n') # Delete wf base dir shutil.rmtree(base_dir) # Return runtime stats - return finish_str + return start_str, finish_str # Test resources were used as expected in cmdline interface @unittest.skipIf(run_profiler == False, skip_profile_msg) @@ -364,7 +374,7 @@ def tiest_cmdline_profiling(self): num_threads = self.num_threads # Run workflow and get stats - finish_str = self._run_cmdline_workflow(num_gb, num_threads) + start_str, finish_str = self._run_cmdline_workflow(num_gb, num_threads) # Get runtime stats as dictionary node_stats = json.loads(finish_str) @@ -406,7 +416,7 @@ def tiest_function_profiling(self): num_threads = self.num_threads # Run workflow and get stats - finish_str = self._run_function_workflow(num_gb, num_threads) + start_str, finish_str = self._run_function_workflow(num_gb, num_threads) # Get runtime stats as dictionary node_stats = json.loads(finish_str) From 340a7b7f816f87cfef4b56f6cd7a80bac18efbff Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Tue, 15 Mar 2016 13:17:59 -0400 Subject: [PATCH 41/78] minor bugs --- nipype/utils/draw_gantt_chart.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 72520910fd..e5c1dfd111 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -102,10 +102,10 @@ def calculate_resources(events, resource): for event in events: all_res = 0 if event['type'] == "start": - all_res =+ int(float(event[resource])) + all_res += int(float(event[resource])) current_time = event['start']; elif event['type'] == "finish": - all_res+ int(float(event[resource])) + all_res+= int(float(event[resource])) current_time = event['finish']; res[current_time] = all_res @@ -136,15 +136,18 @@ def draw_lines(start, total_duration, minute_scale, scale): return result -def draw_nodes(start, nodes, cores, scale, colors): +def draw_nodes(start, nodes, cores, minute_scale, space_between_minutes, colors): result = '' end_times = [datetime.datetime(start.year, start.month, start.day, start.hour, start.minute, start.second) for x in range(cores)] + scale = float(space_between_minutes/float(minute_scale)) + space_between_minutes = float(space_between_minutes/scale) + for node in nodes: node_start = node['start'] node_finish = node['finish'] - offset = ((node_start - start).total_seconds() / 60) * scale + 220 - scale_duration = (node['duration'] / 60) * scale + offset = ((node_start - start).total_seconds() / 60) * scale * space_between_minutes + 220 + scale_duration = (node['duration'] / 60) * scale * space_between_minutes if scale_duration < 5: scale_duration = 5 @@ -159,11 +162,15 @@ def draw_nodes(start, nodes, cores, scale, colors): node_finish.hour, node_finish.minute, node_finish.second) - #end_times[j]+= datetime.timedelta(microseconds=node_finish.microsecond) + break color = random.choice(colors) - new_node = "
"; + n_start = node['start'].strftime("%Y-%m-%d %H:%M:%S") + n_finish = node['finish'].strftime("%Y-%m-%d %H:%M:%S") + n_dur = node['duration']/60 + new_node = "
"%(left, offset, scale_duration, color, node['name'], n_dur, n_start, n_finish) result += new_node + return result def draw_thread_bar(threads,space_between_minutes, minute_scale): @@ -281,10 +288,13 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, html_string += '

Start: '+ result[0]['start'].strftime("%Y-%m-%d %H:%M:%S") +'

' html_string += '

Finish: '+ last_node['finish'].strftime("%Y-%m-%d %H:%M:%S") +'

' - html_string += '

Duration: '+ str(duration/60) +' minutes

' + html_string += '

Duration: '+ "{0:.2f}".format(duration/60) +' minutes

' html_string += '

Nodes: '+str(len(result))+'

' html_string += '

Cores: '+str(cores)+'

' + html_string += draw_lines(start, duration, minute_scale, space_between_minutes) + html_string += draw_nodes(start, result, cores, minute_scale,space_between_minutes, colors) + result = log_to_events(logfile) threads = calculate_resources(result, 'num_threads') html_string += draw_thread_bar(threads, space_between_minutes, minute_scale) From 7062ec8094f5581264101adf82e169e7bec2f7da Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 15 Mar 2016 14:32:05 -0400 Subject: [PATCH 42/78] Just passed all unittests --- nipype/interfaces/tests/test_runtime_profiler.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index a2c1c691e8..61d8eb0fad 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -63,15 +63,15 @@ def _use_gb_ram(num_gb): ''' Function to consume GB of memory ''' - + # Eat 1 GB of memory for 1 second gb_str = ' ' * int(num_gb*1024.0**3) - + # Spin CPU ctr = 0 while ctr < 50e6: ctr += 1 - + # Clear memory del ctr del gb_str @@ -199,12 +199,6 @@ def _collect_range_runtime_stats(self, num_threads): # Return dataframe return runtime_results_df - def test_collect_range(self): - num_threads = 8 - df = self._collect_range_runtime_stats(num_threads) - - df.to_csv('/root/%d_thread_df.csv' % num_threads) - # Test node def _run_cmdline_workflow(self, num_gb, num_threads): ''' @@ -359,7 +353,7 @@ def _run_function_workflow(self, num_gb, num_threads): # Test resources were used as expected in cmdline interface @unittest.skipIf(run_profiler == False, skip_profile_msg) - def tiest_cmdline_profiling(self): + def test_cmdline_profiling(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption from a cmdline function @@ -401,7 +395,7 @@ def tiest_cmdline_profiling(self): # Test resources were used as expected @unittest.skipIf(run_profiler == False, skip_profile_msg) - def tiest_function_profiling(self): + def test_function_profiling(self): ''' Test runtime profiler correctly records workflow RAM/CPUs consumption from a python function From cf1609154d2a58d97132b9e6cb36e828fd7a010d Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 15 Mar 2016 18:09:59 -0400 Subject: [PATCH 43/78] Fixed a small bug in multiproc and added 90% of the user documentation for the resource scheduler and the runtime profiler --- doc/users/resource_sched_profiler.rst | 118 ++++++++++++++++++ .../interfaces/tests/test_runtime_profiler.py | 2 +- nipype/pipeline/plugins/multiproc.py | 4 +- 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 doc/users/resource_sched_profiler.rst diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst new file mode 100644 index 0000000000..37b6f1e22e --- /dev/null +++ b/doc/users/resource_sched_profiler.rst @@ -0,0 +1,118 @@ +.. _resource_sched_profiler: + +============================================ +Resource Scheduling and Profiling with Nipype +============================================ +The latest version of Nipype supports system resource scheduling and profiling. +These features allows users to ensure high throughput of their data processing +while also controlling the amount of computing resources a given workflow will +use. + +Specifying Resources in the Node Interface +========================================== +Each ``Node`` instance interface has two parameters that specify its expected +thread and memory usage: ``num_threads`` and ``estimated_memory_gb``. If a +particular node is expected to use 8 threads and 2 GB of memory: + +:: + import nipype.pipeline.engine as pe + node = pe.Node() + node.interface.num_threads = 8 + node.interface.estimated_memory_gb = 2 + +If the resource parameters are never set, they default to being 1 thread and 1 +GB of RAM. + +Resource Scheduler +================== +The ``MultiProc`` workflow plugin schedules node execution based on the +resources used by the current running nodes and the total resources available to +the workflow. The plugin utilizes the plugin arguments ``n_procs`` and +``memory_gb`` to set the maximum resources a workflow can utilize. To limit a +workflow to using 4 cores and 6 GB of RAM: + +:: + args_dict = {'n_procs' : 4, 'memory_gb' : 6} + workflow.run(plugin='MultiProc', plugin_args=args_dict) + +If these values are not specifically set then the plugin will assume it can +use all of the processors and memory on the system. For example, if the machine +has 16 cores and 12 GB of RAM, the workflow will internally assume those values +for ``n_procs`` and ``memory_gb``, respectively. + +The plugin will then queue eligible nodes for execution based on their expected +usage via the ``num_threads`` and ``estimated_memory_gb`` interface parameters. +If the plugin sees that only 3 of its 4 processors and 4 GB of its 6 GB of RAM +are being used, it will attempt to execute the next available node as long as +its ``num_threads = 1`` and ``estimated_memory_gb <= 2``. If this is not the +case, it will continue to check every available node in the queue until it sees +a node that meets these conditions or it waits for a executing node to finish to +earn back the necessary resources. The priority of the queue is highest for +nodes with the most ``estimated_memory_gb`` followed by nodes with the most +expected ``num_threads``. + +Runtime Profiler and using the Callback Log +=========================================== +It is not always easy to estimate the amount of resources a particular function +or command uses. To help with this, Nipype provides some feedback about the +system resources used by every node during workflow execution via the built-in +runtime profiler. The runtime profiler is automatically enabled if the] +``psutil`` Python package is installed and found on the system. If the package +is not found, the workflow will run normally without the runtime profiler. + +The runtime profiler records the number of threads and the amount of memory (GB) +used as ``runtime_threads`` and ``runtime_memory_gb`` in the Node's +``result.runtime`` parameter. Since the node object is pickled and written to +disk in its working directory, these values are available for analysis after +node or workflow execution by parsing the pickle file contents in Python. + +Nipype also provides a logging mechanism for saving node runtime statistics to +a JSON-style log file via the ``log_nodes_cb`` logger function. This is enabled +by setting the ``status_callback`` parameter to point to this function in the +``plugin_args`` when using the ``MultiProc`` plugin. + +:: + from nipype.pipeline.plugins.callback_log import log_nodes_cb + args_dict = {'n_procs' : 4, 'memory_gb' : 6, + 'status_callback' : log_nodes_cb} + +To set the filepath for the callback log the ``'callback'`` logger must be +configured. + +:: + # Set path to log file + import logging + callback_log_path = '/home/user/run_stats.log' + logger = logging.getLogger('callback') + logger.setLevel(logging.DEBUG) + handler = logging.FileHandler(callback_log_path) + logger.addHandler(handler) + +Finally, the workflow can be ran. + +:: + workflow.run(plugin='MultiProc', plugin_args=args_dict) + +After the workflow finishes executing, the log file at +"/home/user/run_stats.log" can be parsed for the runtime statistics. Here is an +example of what the contents would look like: + +:: + {"name":"resample_node","id":"resample_node", + "start":"2016-03-11 21:43:41.682258", + "estimated_memory_gb":2,"num_threads":1} + {"name":"resample_node","id":"resample_node", + "finish":"2016-03-11 21:44:28.357519", + "estimated_memory_gb":"2","num_threads":"1", + "runtime_threads":"3","runtime_memory_gb":"1.118469238281"} + +Here it can be seen that the number of threads was underestimated while the +amount of memory needed was overestimated. The next time this workflow is run +the user can change the node interface ``num_threads`` and +``estimated_memory_gb`` parameters to reflect this for a higher pipeline +throughput. + +Visualizing Pipeline Resources +============================== +Nipype provides the ability to visualize the workflow execution based on the +runtimes and system resources each node takes. diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 61d8eb0fad..71ccbd9e0b 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -134,7 +134,7 @@ def setUp(self): self.mem_err_percent = 5 # ! Only used for benchmarking the profiler over a range of - # ! RAM usage + # ! RAM usage and number of threads # ! Requires a LOT of RAM to be tested def _collect_range_runtime_stats(self, num_threads): ''' diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 503a9d8c2a..f063e1646c 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -145,8 +145,8 @@ def __init__(self, plugin_args=None): non_daemon = plugin_args['non_daemon'] if 'n_procs' in self.plugin_args: self.processors = self.plugin_args['n_procs'] - if 'memory' in self.plugin_args: - self.memory = self.plugin_args['memory'] + if 'memory_gb' in self.plugin_args: + self.memory_gb = self.plugin_args['memory_gb'] # Instantiate different thread pools for non-daemon processes if non_daemon: # run the execution using the non-daemon pool subclass From 7c90d5ae2a8899441779c6218d869b5c4b1a2bdc Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 16 Mar 2016 17:25:12 -0400 Subject: [PATCH 44/78] Added some details about gantt chart --- doc/users/resource_sched_profiler.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 37b6f1e22e..1673441db8 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -115,4 +115,20 @@ throughput. Visualizing Pipeline Resources ============================== Nipype provides the ability to visualize the workflow execution based on the -runtimes and system resources each node takes. +runtimes and system resources each node takes. It does this using the log file +generated from the callback logger after workflow execution - as shown above. + +:: + from nipype.pipeline.plugins.callback_log import log_nodes_cb + args_dict = {'n_procs' : 4, 'memory_gb' : 6, + 'status_callback' : log_nodes_cb} + workflow.run(plugin='MultiProc', plugin_args=args_dict) + + # ...workflow finishes and writes callback log to '/home/user/run_stats.log' + + from nipype.utils.draw_gantt_chart import generate_gantt_chart + generate_gantt_chart('/home/user/run_stats.log', cores=4) + # ...creates gantt chart in '/home/user/run_stats.log.html' + +The `generate_gantt_chart`` function will create an html file that can be viewed +in a browser. \ No newline at end of file From 8fce7389b983bd297fe922d57ce7bd0ae9d45669 Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Fri, 25 Mar 2016 12:22:37 -0400 Subject: [PATCH 45/78] partial commit to gantt chart --- nipype/utils/draw_gantt_chart.py | 47 ++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index e5c1dfd111..9b1a731adc 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -100,18 +100,20 @@ def log_to_dict(logfile): def calculate_resources(events, resource): res = OrderedDict() for event in events: - all_res = 0 + all_res = 0.0 if event['type'] == "start": - all_res += int(float(event[resource])) + if resource in event and event[resource] != 'Unkown': + all_res += float(event[resource]) current_time = event['start']; elif event['type'] == "finish": - all_res+= int(float(event[resource])) + if resource in event and event[resource] != 'Unkown': + all_res+= float(event[resource]) current_time = event['finish']; - res[current_time] = all_res timestamps = [dateutil.parser.parse(ts) for ts in res.keys()] - time_series = pd.Series(res.values(), timestamps) + time_series = pd.Series(data=res.values(), index=timestamps) + #TODO: pandas is removing all data values somewhere here interp_seq = pd.date_range(time_series.index[0], time_series.index[-1], freq='S') interp_time_series = time_series.reindex(interp_seq) interp_time_series = interp_time_series.fillna(method='ffill') @@ -164,7 +166,9 @@ def draw_nodes(start, nodes, cores, minute_scale, space_between_minutes, colors) node_finish.second) break - color = random.choice(colors) + color = random.choice(colors) + if 'error' in node: + color = 'red' n_start = node['start'].strftime("%Y-%m-%d %H:%M:%S") n_finish = node['finish'].strftime("%Y-%m-%d %H:%M:%S") n_dur = node['duration']/60 @@ -173,12 +177,14 @@ def draw_nodes(start, nodes, cores, minute_scale, space_between_minutes, colors) return result -def draw_thread_bar(threads,space_between_minutes, minute_scale): +def draw_thread_bar(threads,space_between_minutes, minute_scale, color): result = "

Threads

" scale = float(space_between_minutes/float(minute_scale)) space_between_minutes = float(space_between_minutes/60.0) + for i in range(len(threads)): + #print threads[i] width = threads[i] * 10 t = (float(i*scale*minute_scale)/60.0) + 220 bar = "
" @@ -186,7 +192,7 @@ def draw_thread_bar(threads,space_between_minutes, minute_scale): return result -def draw_memory_bar(memory, space_between_minutes, minute_scale): +def draw_memory_bar(memory, space_between_minutes, minute_scale, color): result = "

Memory

" scale = float(space_between_minutes/float(minute_scale)) @@ -195,7 +201,7 @@ def draw_memory_bar(memory, space_between_minutes, minute_scale): for i in range(len(memory)): width = memory[i] * 10 t = (float(i*scale*minute_scale)/60.0) + 220 - bar = "
" + bar = "
" result += bar return result @@ -265,8 +271,8 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, .bar{ position: absolute; - background-color: #80E680; height: 1px; + opacity: 0.7; } .dot{ @@ -296,11 +302,19 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, html_string += draw_nodes(start, result, cores, minute_scale,space_between_minutes, colors) result = log_to_events(logfile) - threads = calculate_resources(result, 'num_threads') - html_string += draw_thread_bar(threads, space_between_minutes, minute_scale) - memory = calculate_resources(result, 'estimated_memory_gb') - html_string += draw_memory_bar(memory, space_between_minutes, minute_scale) + #threads_estimated = calculate_resources(result, 'num_threads') + #html_string += draw_thread_bar(threads_estimated, space_between_minutes, minute_scale, '#90BBD7') + + #threads_real = calculate_resources(result, 'runtime_threads') + #html_string += draw_thread_bar(threads_real, space_between_minutes, minute_scale, '#03969D') + + + #memory_estimated = calculate_resources(result, 'estimated_memory_gb') + #html_string += draw_memory_bar(memory_estimated, space_between_minutes, minute_scale, '#90BBD7') + + memory_real = calculate_resources(result, 'runtime_memory_gb') + html_string += draw_memory_bar(memory_real, space_between_minutes, minute_scale, '#03969D') #finish html @@ -311,4 +325,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, #save file html_file = open(logfile +'.html', 'wb') html_file.write(html_string) - html_file.close() \ No newline at end of file + html_file.close() + + +generate_gantt_chart('/home/caroline/Downloads/callback_0051472.log',8) \ No newline at end of file From 13fddc82e5947207b07c0ee25855e4a5e9c4562a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 25 Mar 2016 18:30:44 -0400 Subject: [PATCH 46/78] Fixed up gantt chart to plot real time memory --- nipype/utils/draw_gantt_chart.py | 280 +++++++++++++++++++++++-------- 1 file changed, 213 insertions(+), 67 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 9b1a731adc..0b7bee4873 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -4,7 +4,6 @@ callback_log.log_nodes_cb() """ -# Import packages # Import packages import json from dateutil import parser @@ -41,10 +40,28 @@ def log_to_events(logfile): events.append(event) return events + def log_to_dict(logfile): + ''' + Function to extract log node dictionaries into a list of python + dictionaries and return the list as well as the final node + + Parameters + ---------- + logfile : string + path to the json-formatted log file generated from a nipype + workflow execution + + Returns + ------- + nodes_list : list + a list of python dictionaries containing the runtime info + for each nipype node + ''' + # Init variables #keep track of important vars - nodes = [] #all the parsed nodes + nodes_list = [] #all the parsed nodes unifinished_nodes = [] #all start nodes that dont have a finish yet with open(logfile, 'r') as content: @@ -59,10 +76,10 @@ def log_to_dict(logfile): node = None try: node = json.loads(l) - except Exception, e: + except Exception: pass - if not node: + if not node: continue #if it is a start node, add to unifinished nodes @@ -78,24 +95,26 @@ def log_to_dict(logfile): for s in range(len(unifinished_nodes)): aux = unifinished_nodes[s] #found the end for node start, copy over info - if aux['id'] == node['id'] and aux['name'] == node['name'] and aux['start'] < node['finish']: + if aux['id'] == node['id'] and aux['name'] == node['name'] \ + and aux['start'] < node['finish']: node['start'] = aux['start'] node['duration'] = (node['finish'] - node['start']).total_seconds() unifinished_nodes.remove(aux) - nodes.append(node) + nodes_list.append(node) break #finished parsing #assume nodes without finish didn't finish running. #set their finish to last node run - last_node = nodes[-1] + last_node = nodes_list[-1] for n in unifinished_nodes: n['finish'] = last_node['finish'] n['duration'] = (n['finish'] - n['start']).total_seconds() - nodes.append(n) + nodes_list.append(n) + + return nodes_list - return nodes, last_node def calculate_resources(events, resource): res = OrderedDict() @@ -107,76 +126,161 @@ def calculate_resources(events, resource): current_time = event['start']; elif event['type'] == "finish": if resource in event and event[resource] != 'Unkown': - all_res+= float(event[resource]) + all_res += float(event[resource]) current_time = event['finish']; res[current_time] = all_res timestamps = [dateutil.parser.parse(ts) for ts in res.keys()] time_series = pd.Series(data=res.values(), index=timestamps) #TODO: pandas is removing all data values somewhere here - interp_seq = pd.date_range(time_series.index[0], time_series.index[-1], freq='S') - interp_time_series = time_series.reindex(interp_seq) - interp_time_series = interp_time_series.fillna(method='ffill') - return interp_time_series + #interp_seq = pd.date_range(time_series.index[0], time_series.index[-1], freq='U') + #interp_time_series = time_series.reindex(interp_seq) + #interp_time_series = interp_time_series.fillna(method='ffill') + return time_series + -#total duration in seconds def draw_lines(start, total_duration, minute_scale, scale): + ''' + Function to draw the minute line markers and timestamps + + Parameters + ---------- + start : datetime.datetime obj + start time for first minute line marker + total_duration : float + total duration of the workflow execution (in seconds) + minute_scale : integer + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + scale : integer + scale factor in pixel spacing between minute line markers + + Returns + ------- + result : string + the html-formatted string for producing the minutes-based + time line markers + ''' + + # Init variables result = '' next_line = 220 - next_time = start; - num_lines = int((total_duration/60) / minute_scale) +2; + next_time = start + num_lines = int((total_duration/60) / minute_scale) + 2 - for i in range(num_lines): - new_line = "
" + # Iterate through the lines and create html line markers string + for line in range(num_lines): + # Line object + new_line = "
" % next_line result += new_line - - time = "

" + str(next_time.hour) + ':' + str(next_time.minute) + "

"; + # Time digits + time = "

%02d:%02d

" % \ + (next_line-20, next_time.hour, next_time.minute) result += time - + # Increment line spacing and digits next_line += minute_scale * scale next_time += datetime.timedelta(minutes=minute_scale) + + # Return html string for time line markers return result -def draw_nodes(start, nodes, cores, minute_scale, space_between_minutes, colors): - result = '' - end_times = [datetime.datetime(start.year, start.month, start.day, start.hour, start.minute, start.second) for x in range(cores)] +def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, colors): + ''' + Function to return the html-string of the node drawings for the + gantt chart + + Parameters + ---------- + start : datetime.datetime obj + start time for first node + nodes_list : list + a list of the node dictionaries + cores : integer + the number of cores given to the workflow via the 'n_procs' + plugin arg + total_duration : float + total duration of the workflow execution (in seconds) + minute_scale : integer + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + space_between_minutes : integer + scale factor in pixel spacing between minute line markers + colors : list + a list of colors to choose from when coloring the nodes in the + gantt chart + + Returns + ------- + result : string + the html-formatted string for producing the minutes-based + time line markers + ''' + # Init variables + result = '' scale = float(space_between_minutes/float(minute_scale)) space_between_minutes = float(space_between_minutes/scale) + end_times = [datetime.datetime(start.year, start.month, start.day, + start.hour, start.minute, start.second) \ + for core in range(cores)] - for node in nodes: + # For each node in the pipeline + for node in nodes_list: + # Get start and finish times node_start = node['start'] node_finish = node['finish'] - offset = ((node_start - start).total_seconds() / 60) * scale * space_between_minutes + 220 + # Calculate an offset and scale duration + offset = ((node_start - start).total_seconds() / 60) * scale * \ + space_between_minutes + 220 + # Scale duration scale_duration = (node['duration'] / 60) * scale * space_between_minutes if scale_duration < 5: scale_duration = 5 - scale_duration -= 2 + # Left left = 60 - for j in range(len(end_times)): - if end_times[j] < node_start: - left += j * 30 - end_times[j] = datetime.datetime(node_finish.year, - node_finish.month, - node_finish.day, - node_finish.hour, - node_finish.minute, - node_finish.second) - + for core in range(len(end_times)): + if end_times[core] < node_start: + left += core * 30 + end_times[core] = datetime.datetime(node_finish.year, + node_finish.month, + node_finish.day, + node_finish.hour, + node_finish.minute, + node_finish.second) break + + # Get color for node object color = random.choice(colors) if 'error' in node: color = 'red' - n_start = node['start'].strftime("%Y-%m-%d %H:%M:%S") - n_finish = node['finish'].strftime("%Y-%m-%d %H:%M:%S") - n_dur = node['duration']/60 - new_node = "
"%(left, offset, scale_duration, color, node['name'], n_dur, n_start, n_finish) + + # Setup dictionary for node html string insertion + node_dict = {'left' : left, + 'offset' : offset, + 'scale_duration' : scale_duration, + 'color' : color, + 'node_name' : node['name'], + 'node_dur' : node['duration']/60.0, + 'node_start' : node_start.strftime("%Y-%m-%d %H:%M:%S"), + 'node_finish' : node_finish.strftime("%Y-%m-%d %H:%M:%S")} + # Create new node string + new_node = "
" % \ + node_dict + + # Append to output result result += new_node + # Return html string for nodes return result + def draw_thread_bar(threads,space_between_minutes, minute_scale, color): result = "

Threads

" @@ -192,16 +296,31 @@ def draw_thread_bar(threads,space_between_minutes, minute_scale, color): return result -def draw_memory_bar(memory, space_between_minutes, minute_scale, color): - result = "

Memory

" +def draw_memory_bar(nodes_list, space_between_minutes, minute_scale, color, + mem_key='runtime_memory_gb'): + ''' + ''' + + # Init variables + # Memory header + result = "

Memory

" + # scale = float(space_between_minutes/float(minute_scale)) - space_between_minutes = float(space_between_minutes/60.0) + space_between_minutes = float(space_between_minutes/scale) - for i in range(len(memory)): - width = memory[i] * 10 - t = (float(i*scale*minute_scale)/60.0) + 220 - bar = "
" + for idx, node in enumerate(nodes_list): + try: + memory = float(node[mem_key]) + except: + memory = 0 + + height = (node['duration'] / 60) * scale * space_between_minutes + width = memory * 20 + t = (float(idx*scale*minute_scale)/60.0) + 220 + bar = "
" result += bar return result @@ -214,7 +333,33 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, Generates a gantt chart in html showing the workflow execution based on a callback log file. This script was intended to be used with the MultiprocPlugin. The following code shows how to set up the workflow in order to generate the log file: - + + Parameters + ---------- + logfile : string + filepath to the callback log file to plot the gantt chart of + cores : integer + the number of cores given to the workflow via the 'n_procs' + plugin arg + minute_scale : integer (optional); default=10 + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + space_between_minutes : integer (optional); default=50 + scale factor in pixel spacing between minute line markers + colors : list (optional) + a list of colors to choose from when coloring the nodes in the + gantt chart + + + Returns + ------- + None + the function does not return any value but writes out an html + file in the same directory as the callback log path passed in + + Usage + ----- # import logging # import logging.handlers # from nipype.pipeline.plugins.callback_log import log_nodes_cb @@ -234,8 +379,8 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, # generate_gantt_chart('callback.log', 8) ''' - result, last_node = log_to_dict(logfile) - scale = space_between_minutes + nodes_list = log_to_dict(logfile) + scale = space_between_minutes #add the html header html_string = ''' @@ -289,17 +434,21 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, #create the header of the report with useful information - start = result[0]['start'] - duration = (last_node['finish'] - start).total_seconds() - - html_string += '

Start: '+ result[0]['start'].strftime("%Y-%m-%d %H:%M:%S") +'

' - html_string += '

Finish: '+ last_node['finish'].strftime("%Y-%m-%d %H:%M:%S") +'

' - html_string += '

Duration: '+ "{0:.2f}".format(duration/60) +' minutes

' - html_string += '

Nodes: '+str(len(result))+'

' - html_string += '

Cores: '+str(cores)+'

' - - html_string += draw_lines(start, duration, minute_scale, space_between_minutes) - html_string += draw_nodes(start, result, cores, minute_scale,space_between_minutes, colors) + start_node = nodes_list[0] + last_node = nodes_list[-1] + duration = (last_node['finish'] - start_node['start']).total_seconds() + + #summary strings of workflow at top + html_string += '

Start: ' + start_node['start'].strftime("%Y-%m-%d %H:%M:%S") + '

' + html_string += '

Finish: ' + last_node['finish'].strftime("%Y-%m-%d %H:%M:%S") + '

' + html_string += '

Duration: ' + "{0:.2f}".format(duration/60) + ' minutes

' + html_string += '

Nodes: ' + str(len(nodes_list))+'

' + html_string += '

Cores: ' + str(cores) + '

' + + html_string += draw_lines(start_node['start'], duration, minute_scale, + space_between_minutes) + html_string += draw_nodes(start_node['start'], nodes_list, cores, minute_scale, + space_between_minutes, colors) result = log_to_events(logfile) @@ -314,7 +463,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, #html_string += draw_memory_bar(memory_estimated, space_between_minutes, minute_scale, '#90BBD7') memory_real = calculate_resources(result, 'runtime_memory_gb') - html_string += draw_memory_bar(memory_real, space_between_minutes, minute_scale, '#03969D') + html_string += draw_memory_bar(nodes_list, space_between_minutes, minute_scale, '#03969D') #finish html @@ -326,6 +475,3 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, html_file = open(logfile +'.html', 'wb') html_file.write(html_string) html_file.close() - - -generate_gantt_chart('/home/caroline/Downloads/callback_0051472.log',8) \ No newline at end of file From 54a2c63803cbf37fd7afbe3c5d5cb12acf30372e Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 28 Mar 2016 17:54:19 -0400 Subject: [PATCH 47/78] Finished working prototype of gantt chart generator --- nipype/utils/draw_gantt_chart.py | 290 +++++++++++++++++++++---------- 1 file changed, 195 insertions(+), 95 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 0b7bee4873..c7eb876351 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -10,34 +10,73 @@ import datetime import random import pandas as pd -import dateutil from collections import OrderedDict -def log_to_events(logfile): - events = [] - with open(logfile, 'r') as content: - #read file separating each line - content = content.read() - lines = content.split('\n') +def create_event_dict(start_time, nodes_list): + ''' + Function to generate a dictionary of event (start/finish) nodes + from the nodes list - for l in lines: - event = None - try: - event = json.loads(l) - except Exception, e: - pass + Parameters + ---------- + start_time : datetime.datetime + a datetime object of the pipeline start time + nodes_list : list + a list of the node dictionaries that were run in the pipeline - if not event: continue + Returns + ------- + events : dictionary + a dictionary where the key is the timedelta from the start of + the pipeline execution to the value node it accompanies + ''' - if 'start' in event: - event['type'] = 'start' - event['time'] = event['start'] - else: - event['type'] = 'finish' - event['time'] = event['finish'] + # Import packages + import copy - events.append(event) + events = {} + for node in nodes_list: + # Format node fields + try: + estimated_threads = float(node['num_threads']) + except: + estimated_threads = 1 + try: + estimated_memory_gb = float(node['estimated_memory_gb']) + except: + estimated_memory_gb = 1.0 + try: + runtime_threads = float(node['runtime_threads']) + except: + runtime_threads = 0 + try: + runtime_memory_gb = float(node['runtime_memory_gb']) + except: + runtime_memory_gb = 0.0 + + # Init and format event-based nodes + node['estimated_threads'] = estimated_threads + node['estimated_memory_gb'] = estimated_memory_gb + node['runtime_threads'] = runtime_threads + node['runtime_memory_gb'] = runtime_memory_gb + start_node = node + finish_node = copy.deepcopy(node) + start_node['event'] = 'start' + finish_node['event'] = 'finish' + + # Get dictionary key + start_delta = (node['start'] - start_time).total_seconds() + finish_delta = (node['finish'] - start_time).total_seconds() + + # Populate dictionary + if events.has_key(start_delta) or events.has_key(finish_delta): + err_msg = 'Event logged twice or events started at exact same time!' + raise KeyError(err_msg) + events[start_delta] = start_node + events[finish_delta] = finish_node + + # Return events dictionary return events @@ -65,7 +104,6 @@ def log_to_dict(logfile): unifinished_nodes = [] #all start nodes that dont have a finish yet with open(logfile, 'r') as content: - #read file separating each line content = content.read() lines = content.split('\n') @@ -98,7 +136,8 @@ def log_to_dict(logfile): if aux['id'] == node['id'] and aux['name'] == node['name'] \ and aux['start'] < node['finish']: node['start'] = aux['start'] - node['duration'] = (node['finish'] - node['start']).total_seconds() + node['duration'] = \ + (node['finish'] - node['start']).total_seconds() unifinished_nodes.remove(aux) nodes_list.append(node) @@ -113,29 +152,54 @@ def log_to_dict(logfile): n['duration'] = (n['finish'] - n['start']).total_seconds() nodes_list.append(n) + # Return list of nodes return nodes_list -def calculate_resources(events, resource): +def calculate_resource_timeseries(events, resource): + ''' + Given as event dictionary, calculate the resources used + as a timeseries + + Parameters + ---------- + events : dictionary + a dictionary of event-based node dictionaries of the workflow + execution statistics + resource : string + the resource of interest to return the time-series of; + e.g. 'runtime_memory_gb', 'estimated_threads', etc + + Returns + ------- + time_series : pandas Series + a pandas Series object that contains timestamps as the indices + and the resource amount as values + ''' + + # Init variables res = OrderedDict() - for event in events: - all_res = 0.0 - if event['type'] == "start": + all_res = 0.0 + + # Iterate through the events + for tdelta, event in sorted(events.items()): + if event['event'] == "start": if resource in event and event[resource] != 'Unkown': all_res += float(event[resource]) current_time = event['start']; - elif event['type'] == "finish": + elif event['event'] == "finish": if resource in event and event[resource] != 'Unkown': - all_res += float(event[resource]) + all_res -= float(event[resource]) current_time = event['finish']; res[current_time] = all_res - timestamps = [dateutil.parser.parse(ts) for ts in res.keys()] - time_series = pd.Series(data=res.values(), index=timestamps) - #TODO: pandas is removing all data values somewhere here - #interp_seq = pd.date_range(time_series.index[0], time_series.index[-1], freq='U') - #interp_time_series = time_series.reindex(interp_seq) - #interp_time_series = interp_time_series.fillna(method='ffill') + # Formulate the pandas timeseries + time_series = pd.Series(data=res.values(), index=res.keys()) + # Downsample where there is only value-diff + ts_diff = time_series.diff() + time_series = time_series[ts_diff!=0] + + # Return the new time series return time_series @@ -186,7 +250,8 @@ def draw_lines(start, total_duration, minute_scale, scale): return result -def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, colors): +def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, + colors): ''' Function to return the html-string of the node drawings for the gantt chart @@ -269,8 +334,8 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, co 'node_finish' : node_finish.strftime("%Y-%m-%d %H:%M:%S")} # Create new node string new_node = "
" % \ node_dict @@ -280,49 +345,79 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, co # Return html string for nodes return result - -def draw_thread_bar(threads,space_between_minutes, minute_scale, color): - result = "

Threads

" - - scale = float(space_between_minutes/float(minute_scale)) - space_between_minutes = float(space_between_minutes/60.0) - - for i in range(len(threads)): - #print threads[i] - width = threads[i] * 10 - t = (float(i*scale*minute_scale)/60.0) + 220 - bar = "
" - result += bar - - return result - - -def draw_memory_bar(nodes_list, space_between_minutes, minute_scale, color, - mem_key='runtime_memory_gb'): +# def draw_thread_bar(threads,space_between_minutes, minute_scale, color): +# result = "

Threads

" +# +# scale = float(space_between_minutes/float(minute_scale)) +# space_between_minutes = float(space_between_minutes/60.0) +# +# for i in range(len(threads)): +# #print threads[i] +# width = threads[i] * 10 +# t = (float(i*scale*minute_scale)/60.0) + 220 +# bar = "
" +# result += bar +# +# return result + +def draw_resource_bar(start_time, finish_time, time_series, space_between_minutes, + minute_scale, color, left, resource): ''' ''' - # Init variables # Memory header - result = "

Memory

" - # + result = "

%s

" \ + % (left, resource) + # Image scaling factors scale = float(space_between_minutes/float(minute_scale)) space_between_minutes = float(space_between_minutes/scale) - for idx, node in enumerate(nodes_list): - try: - memory = float(node[mem_key]) - except: - memory = 0 - - height = (node['duration'] / 60) * scale * space_between_minutes - width = memory * 20 - t = (float(idx*scale*minute_scale)/60.0) + 220 - bar = "
" - result += bar - + # Iterate through time series + ts_len = len(time_series) + for idx, (ts_start, amount) in enumerate(time_series.iteritems()): + if idx < ts_len-1: + ts_end = time_series.index[idx+1] + else: + ts_end = finish_time + # Calculate offset from start at top + offset = ((ts_start-start_time).total_seconds() / 60.0) * scale * \ + space_between_minutes + 220 + # Scale duration + duration_mins = (ts_end-ts_start).total_seconds() / 60.0 + height = duration_mins * scale * \ + space_between_minutes + if height < 5: + height = 5 + height -= 2 + + # Bar width is proportional to resource amount + width = amount * 20 + + if resource.lower() == 'memory': + label = '%.3f GB' % amount + else: + label = '%d threads' % amount + + # Setup dictionary for bar html string insertion + bar_dict = {'color' : color, + 'height' : height, + 'width' : width, + 'offset': offset, + 'left' : left, + 'label' : label, + 'duration' : duration_mins, + 'start' : ts_start.strftime('%Y-%m-%d %H:%M:%S'), + 'finish' : ts_end.strftime('%Y-%m-%d %H:%M:%S')} + + bar_html = "
" + # Add another bar to html line + result += bar_html % bar_dict + + # Return bar-formatted html string return result @@ -379,9 +474,6 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, # generate_gantt_chart('callback.log', 8) ''' - nodes_list = log_to_dict(logfile) - scale = space_between_minutes - #add the html header html_string = ''' @@ -432,39 +524,47 @@ def generate_gantt_chart(logfile, cores, minute_scale=10,
''' + # Read in json-log to get list of node dicts + nodes_list = log_to_dict(logfile) - #create the header of the report with useful information + # Create the header of the report with useful information start_node = nodes_list[0] last_node = nodes_list[-1] duration = (last_node['finish'] - start_node['start']).total_seconds() - #summary strings of workflow at top + # Get events based dictionary of node run stats + events = create_event_dict(start_node['start'], nodes_list) + + # Summary strings of workflow at top html_string += '

Start: ' + start_node['start'].strftime("%Y-%m-%d %H:%M:%S") + '

' html_string += '

Finish: ' + last_node['finish'].strftime("%Y-%m-%d %H:%M:%S") + '

' html_string += '

Duration: ' + "{0:.2f}".format(duration/60) + ' minutes

' html_string += '

Nodes: ' + str(len(nodes_list))+'

' html_string += '

Cores: ' + str(cores) + '

' + # Draw nipype nodes Gantt chart and runtimes html_string += draw_lines(start_node['start'], duration, minute_scale, space_between_minutes) html_string += draw_nodes(start_node['start'], nodes_list, cores, minute_scale, space_between_minutes, colors) - result = log_to_events(logfile) - - #threads_estimated = calculate_resources(result, 'num_threads') - #html_string += draw_thread_bar(threads_estimated, space_between_minutes, minute_scale, '#90BBD7') - - #threads_real = calculate_resources(result, 'runtime_threads') - #html_string += draw_thread_bar(threads_real, space_between_minutes, minute_scale, '#03969D') - - - #memory_estimated = calculate_resources(result, 'estimated_memory_gb') - #html_string += draw_memory_bar(memory_estimated, space_between_minutes, minute_scale, '#90BBD7') - - memory_real = calculate_resources(result, 'runtime_memory_gb') - html_string += draw_memory_bar(nodes_list, space_between_minutes, minute_scale, '#03969D') - + # Get memory timeseries + estimated_mem_ts = calculate_resource_timeseries(events, 'estimated_memory_gb') + runtime_mem_ts = calculate_resource_timeseries(events, 'runtime_memory_gb') + # Plot gantt chart + html_string += draw_resource_bar(start_node['start'], last_node['finish'], estimated_mem_ts, + space_between_minutes, minute_scale, '#90BBD7', 1200, 'Memory') + html_string += draw_resource_bar(start_node['start'], last_node['finish'], runtime_mem_ts, + space_between_minutes, minute_scale, '#03969D', 1200, 'Memory') + + # Get threads timeseries + estimated_threads_ts = calculate_resource_timeseries(events, 'estimated_threads') + runtime_threads_ts = calculate_resource_timeseries(events, 'runtime_threads') + # Plot gantt chart + html_string += draw_resource_bar(start_node['start'], last_node['finish'], estimated_threads_ts, + space_between_minutes, minute_scale, '#90BBD7', 600, 'Threads') + html_string += draw_resource_bar(start_node['start'], last_node['finish'], runtime_threads_ts, + space_between_minutes, minute_scale, '#03969D', 600, 'Threads') #finish html html_string+= ''' @@ -472,6 +572,6 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, ''' #save file - html_file = open(logfile +'.html', 'wb') + html_file = open(logfile + '.html', 'wb') html_file.write(html_string) html_file.close() From 8ca97c861906c8b171b371144fa7595bf078af34 Mon Sep 17 00:00:00 2001 From: carolFrohlich Date: Wed, 30 Mar 2016 14:29:21 -0400 Subject: [PATCH 48/78] remove white space, add labels --- nipype/utils/draw_gantt_chart.py | 49 +++++++++++++++++--------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index c7eb876351..b55501ad66 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -236,7 +236,7 @@ def draw_lines(start, total_duration, minute_scale, scale): # Iterate through the lines and create html line markers string for line in range(num_lines): # Line object - new_line = "
" % next_line + new_line = "
" % next_line result += new_line # Time digits time = "

%02d:%02d

" % \ @@ -345,21 +345,6 @@ def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, # Return html string for nodes return result -# def draw_thread_bar(threads,space_between_minutes, minute_scale, color): -# result = "

Threads

" -# -# scale = float(space_between_minutes/float(minute_scale)) -# space_between_minutes = float(space_between_minutes/60.0) -# -# for i in range(len(threads)): -# #print threads[i] -# width = threads[i] * 10 -# t = (float(i*scale*minute_scale)/60.0) + 220 -# bar = "
" -# result += bar -# -# return result - def draw_resource_bar(start_time, finish_time, time_series, space_between_minutes, minute_scale, color, left, resource): ''' @@ -479,7 +464,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, -
''' +
+
+ ''' + + close_header = ''' +
+
+

Estimated Resource

+

Actual Resource

+

Failed Node

+
+ ''' # Read in json-log to get list of node dicts nodes_list = log_to_dict(logfile) @@ -541,7 +543,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, html_string += '

Duration: ' + "{0:.2f}".format(duration/60) + ' minutes

' html_string += '

Nodes: ' + str(len(nodes_list))+'

' html_string += '

Cores: ' + str(cores) + '

' - + html_string += close_header # Draw nipype nodes Gantt chart and runtimes html_string += draw_lines(start_node['start'], duration, minute_scale, space_between_minutes) @@ -552,19 +554,20 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, estimated_mem_ts = calculate_resource_timeseries(events, 'estimated_memory_gb') runtime_mem_ts = calculate_resource_timeseries(events, 'runtime_memory_gb') # Plot gantt chart + resource_offset = 120 + 30*cores html_string += draw_resource_bar(start_node['start'], last_node['finish'], estimated_mem_ts, - space_between_minutes, minute_scale, '#90BBD7', 1200, 'Memory') + space_between_minutes, minute_scale, '#90BBD7', resource_offset*2+120, 'Memory') html_string += draw_resource_bar(start_node['start'], last_node['finish'], runtime_mem_ts, - space_between_minutes, minute_scale, '#03969D', 1200, 'Memory') + space_between_minutes, minute_scale, '#03969D', resource_offset*2+120, 'Memory') # Get threads timeseries estimated_threads_ts = calculate_resource_timeseries(events, 'estimated_threads') runtime_threads_ts = calculate_resource_timeseries(events, 'runtime_threads') # Plot gantt chart html_string += draw_resource_bar(start_node['start'], last_node['finish'], estimated_threads_ts, - space_between_minutes, minute_scale, '#90BBD7', 600, 'Threads') + space_between_minutes, minute_scale, '#90BBD7', resource_offset, 'Threads') html_string += draw_resource_bar(start_node['start'], last_node['finish'], runtime_threads_ts, - space_between_minutes, minute_scale, '#03969D', 600, 'Threads') + space_between_minutes, minute_scale, '#03969D', resource_offset, 'Threads') #finish html html_string+= ''' From 6ac0bc3f27c0712af921f1b7627311293f8d777f Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 30 Mar 2016 18:09:51 -0400 Subject: [PATCH 49/78] Changed thread count logic --- nipype/interfaces/base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index ca5d30a5eb..a04c847e27 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1228,8 +1228,11 @@ def _get_num_threads(proc): # Iterate through child processes and get number of their threads try: + num_children = len(proc.children()) for child in proc.children(recursive=True): - num_threads += child.num_threads() + #num_threads += child.num_threads() + num_threads = max(num_threads, num_children, + child.num_threads(), len(child.children())) except psutil.NoSuchProcess: pass From 7a8383b649c7769bc6949e6d23a1e8dba8099208 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 1 Apr 2016 17:34:29 -0400 Subject: [PATCH 50/78] Experimented with process STATUS --- nipype/interfaces/base.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index a04c847e27..75b52d27e2 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1225,14 +1225,16 @@ def _get_num_threads(proc): # Init variables num_threads = proc.num_threads() - + alive_procs = 0 # Iterate through child processes and get number of their threads try: - num_children = len(proc.children()) + #num_children = len(proc.children()) for child in proc.children(recursive=True): - #num_threads += child.num_threads() - num_threads = max(num_threads, num_children, - child.num_threads(), len(child.children())) + if child.status() == psutil.STATUS_RUNNING: + alive_procs += 1 + num_threads += max(alive_procs, child.num_threads()) #child.num_threads() + #num_threads = max(num_threads, num_children, + # child.num_threads(), len(child.children())) except psutil.NoSuchProcess: pass From be1ec62100bfa9f15ff377c69ae2b5aa4d31aff9 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 20 Apr 2016 18:38:08 -0400 Subject: [PATCH 51/78] Added global watcher --- nipype/interfaces/base.py | 35 ++++++++++++++++++++++---------- nipype/interfaces/utility.py | 4 ++-- nipype/pipeline/plugins/base.py | 22 ++++++++++++++++++++ nipype/utils/draw_gantt_chart.py | 6 ++++-- 4 files changed, 52 insertions(+), 15 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 75b52d27e2..da678da581 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1222,19 +1222,32 @@ def _get_num_threads(proc): # Import packages import psutil + import logging # Init variables - num_threads = proc.num_threads() - alive_procs = 0 + cb_log = logging.getLogger('callback') + cb_log.propogate = False + cb_log.debug('proc pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ + % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), proc.num_threads())) + if proc.status() == psutil.STATUS_RUNNING: + num_threads = proc.num_threads() + else: + num_threads = 0 + child_threads = 0 # Iterate through child processes and get number of their threads try: - #num_children = len(proc.children()) for child in proc.children(recursive=True): if child.status() == psutil.STATUS_RUNNING: - alive_procs += 1 - num_threads += max(alive_procs, child.num_threads()) #child.num_threads() + # If leaf child process + if len(child.children()) == 0: + child_threads += child.num_threads() + #num_threads = max(num_threads, child.num_threads()) #child.num_threads() + cb_log.debug('child pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ + % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), child.num_threads())) + #num_threads = max(num_threads, num_children, # child.num_threads(), len(child.children())) + num_threads = max(child_threads, num_threads) except psutil.NoSuchProcess: pass @@ -1380,8 +1393,8 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): outfile = os.path.join(runtime.cwd, 'stdout.nipype') # Init variables for memory profiling - mem_mb = -1 - num_threads = -1 + mem_mb = 0 + num_threads = 0 interval = .5 if output == 'stream': @@ -1405,7 +1418,7 @@ def _process(drain=0): get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() _process() - #time.sleep(interval) + time.sleep(interval) _process(drain=1) # collect results, merge and return @@ -1424,7 +1437,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - #time.sleep(interval) + time.sleep(interval) stdout, stderr = proc.communicate() if stdout and isinstance(stdout, bytes): try: @@ -1446,7 +1459,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - #time.sleep(interval) + time.sleep(interval) ret_code = proc.wait() stderr.flush() stdout.flush() @@ -1459,7 +1472,7 @@ def _process(drain=0): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() - #time.sleep(interval) + time.sleep(interval) proc.communicate() result['stdout'] = [] result['stderr'] = [] diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 0443013a9c..c3f51ecb90 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -484,8 +484,8 @@ def _function_handle_wrapper(queue, **kwargs): args=(queue,), kwargs=args) # Init memory and threads before profiling - mem_mb = -1 - num_threads = -1 + mem_mb = 0 + num_threads = 0 # Start process and profile while it's alive proc.start() diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 8f4638db30..f251b945c7 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -223,6 +223,22 @@ def __init__(self, plugin_args=None): def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline using distributed approaches """ + # Global watcher inits + from nipype.interfaces.base import get_max_resources_used + gpid = os.getpid() + num_threads = 0 + memory_mb = 0 + # Init logger + import logging as lg + gw_log = lg.getLogger('global_watcher') + gw_log.setLevel(lg.INFO) + formatter = lg.Formatter('%(asctime)s : %(message)s') + + # Write logs to file + file_handler = lg.FileHandler('/home/dclark/work-dir/gw.log') + file_handler.setFormatter(formatter) + gw_log.addHandler(file_handler) + logger.info("Running in parallel.") self._config = config # Generate appropriate structures for worker-manager model @@ -235,6 +251,11 @@ def run(self, graph, config, updatehash=False): notrun = [] while np.any(self.proc_done == False) | \ np.any(self.proc_pending == True): + + mem_mb, num_thr = get_max_resources_used(gpid, memory_mb, num_threads) + memory_mb = max(mem_mb, memory_mb) + num_threads = max(num_thr, num_threads) + gw_log.info('Memory GB usage: %.4f, Threads usage: %d' % (memory_mb/1024.0, num_threads)) toappend = [] # trigger callbacks for any pending results while self.pending_tasks: @@ -266,6 +287,7 @@ def run(self, graph, config, updatehash=False): else: logger.debug('Not submitting') self._wait() + self._remove_node_dirs() report_nodes_not_run(notrun) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index b55501ad66..16b3a3d4bf 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -183,6 +183,8 @@ def calculate_resource_timeseries(events, resource): # Iterate through the events for tdelta, event in sorted(events.items()): + if tdelta > 70.7: + print 'hi' if event['event'] == "start": if resource in event and event[resource] != 'Unkown': all_res += float(event[resource]) @@ -520,8 +522,8 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, close_header = '''
-

Estimated Resource

-

Actual Resource

+

Estimated Resource

+

Actual Resource

Failed Node

''' From 6fe8391fed61dc4dd7209e4a83c22f5a72fbace4 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 22 Apr 2016 11:04:49 -0400 Subject: [PATCH 52/78] Debug code --- nipype/interfaces/afni/base.py | 4 ++++ nipype/interfaces/base.py | 27 ++++++++++++++++----------- nipype/interfaces/utility.py | 2 +- nipype/pipeline/plugins/base.py | 6 ++---- nipype/utils/draw_gantt_chart.py | 2 +- 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 0f953c82e4..a0c9d4b6ad 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -147,6 +147,10 @@ def __init__(self, **inputs): else: self._output_update() + # Update num threads estimate from OMP_NUM_THREADS env var + import os + self.num_threads = int(os.getenv('OMP_NUM_THREADS', 1)) + def _output_update(self): """ i think? updates class private attribute based on instance input in fsl also updates ENVIRON variable....not valid in afni diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index da678da581..51e7de633e 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1205,7 +1205,7 @@ def _read(self, drain): # Get number of threads for process -def _get_num_threads(proc): +def _get_num_threads(proc, called_from): """Function to get the number of threads a process is using NOTE: If @@ -1227,12 +1227,16 @@ def _get_num_threads(proc): # Init variables cb_log = logging.getLogger('callback') cb_log.propogate = False + cb_log.debug('\n---------------------\nCalled from: %s' % called_from) cb_log.debug('proc pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), proc.num_threads())) + + if proc.status() == psutil.STATUS_RUNNING: num_threads = proc.num_threads() else: num_threads = 0 + child_threads = 0 # Iterate through child processes and get number of their threads try: @@ -1241,17 +1245,18 @@ def _get_num_threads(proc): # If leaf child process if len(child.children()) == 0: child_threads += child.num_threads() - #num_threads = max(num_threads, child.num_threads()) #child.num_threads() cb_log.debug('child pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), child.num_threads())) + cb_log.debug('child_threads: %d, num_threads: %d' % (child_threads, num_threads)) - #num_threads = max(num_threads, num_children, - # child.num_threads(), len(child.children())) num_threads = max(child_threads, num_threads) except psutil.NoSuchProcess: pass + + # Return number of threads found + cb_log.debug('RETURNING num_threads as: %d!\n---------------------------\n' % num_threads) return num_threads @@ -1307,7 +1312,7 @@ def _get_ram_mb(pid, pyfunc=False): # Get max resources used for process -def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): +def get_max_resources_used(pid, mem_mb, num_threads, called_from, pyfunc=False): """Function to get the RAM and threads usage of a process Paramters @@ -1332,7 +1337,7 @@ def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): try: mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(pid))) + num_threads = max(num_threads, _get_num_threads(psutil.Process(pid), called_from)) except Exception as exc: iflogger.info('Could not get resources used by process. Error: %s'\ % exc) @@ -1394,7 +1399,7 @@ def run_command(runtime, output=None, timeout=0.01, redirect_x=False): # Init variables for memory profiling mem_mb = 0 - num_threads = 0 + num_threads = 1 interval = .5 if output == 'stream': @@ -1415,7 +1420,7 @@ def _process(drain=0): while proc.returncode is None: if runtime_profile: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) + get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) proc.poll() _process() time.sleep(interval) @@ -1435,7 +1440,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) + get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) proc.poll() time.sleep(interval) stdout, stderr = proc.communicate() @@ -1457,7 +1462,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) + get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) proc.poll() time.sleep(interval) ret_code = proc.wait() @@ -1470,7 +1475,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) + get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) proc.poll() time.sleep(interval) proc.communicate() diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index c3f51ecb90..819987d926 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -492,7 +492,7 @@ def _function_handle_wrapper(queue, **kwargs): while proc.is_alive(): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads, - pyfunc=True) + function_handle.__name__, pyfunc=True) # Get result from process queue out = queue.get() diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index f251b945c7..a78ed7ddd4 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -252,10 +252,8 @@ def run(self, graph, config, updatehash=False): while np.any(self.proc_done == False) | \ np.any(self.proc_pending == True): - mem_mb, num_thr = get_max_resources_used(gpid, memory_mb, num_threads) - memory_mb = max(mem_mb, memory_mb) - num_threads = max(num_thr, num_threads) - gw_log.info('Memory GB usage: %.4f, Threads usage: %d' % (memory_mb/1024.0, num_threads)) + #memory_mb, num_threads = get_max_resources_used(gpid, memory_mb, num_threads, ) + #gw_log.info('Memory GB usage: %.4f, Threads usage: %d' % (memory_mb/1024.0, num_threads)) toappend = [] # trigger callbacks for any pending results while self.pending_tasks: diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 16b3a3d4bf..97a2be21bb 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -183,7 +183,7 @@ def calculate_resource_timeseries(events, resource): # Iterate through the events for tdelta, event in sorted(events.items()): - if tdelta > 70.7: + if tdelta > 80: print 'hi' if event['event'] == "start": if resource in event and event[resource] != 'Unkown': From fcaec7979ed667c32941aaab1a40bcfa382580e9 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 22 Apr 2016 11:32:04 -0400 Subject: [PATCH 53/78] Cleaned up debug code --- nipype/interfaces/base.py | 45 ++++++++++++--------------------- nipype/interfaces/utility.py | 2 +- nipype/pipeline/plugins/base.py | 18 ------------- 3 files changed, 17 insertions(+), 48 deletions(-) diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index 51e7de633e..23b7a38aee 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1205,7 +1205,7 @@ def _read(self, drain): # Get number of threads for process -def _get_num_threads(proc, called_from): +def _get_num_threads(proc): """Function to get the number of threads a process is using NOTE: If @@ -1222,41 +1222,28 @@ def _get_num_threads(proc, called_from): # Import packages import psutil - import logging - - # Init variables - cb_log = logging.getLogger('callback') - cb_log.propogate = False - cb_log.debug('\n---------------------\nCalled from: %s' % called_from) - cb_log.debug('proc pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ - % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), proc.num_threads())) - + # If process is running if proc.status() == psutil.STATUS_RUNNING: num_threads = proc.num_threads() else: num_threads = 0 - child_threads = 0 - # Iterate through child processes and get number of their threads + # Try-block for errors try: + child_threads = 0 + # Iterate through child processes and get number of their threads for child in proc.children(recursive=True): - if child.status() == psutil.STATUS_RUNNING: - # If leaf child process - if len(child.children()) == 0: - child_threads += child.num_threads() - cb_log.debug('child pid: %d, parent pid: %d, name: %s, exe: %s, cmdline: %s, status: %s, num_threads: %d' \ - % (proc.pid, proc.ppid(), proc.name(), proc.exe(), proc.cmdline(), proc.status(), child.num_threads())) - cb_log.debug('child_threads: %d, num_threads: %d' % (child_threads, num_threads)) - - num_threads = max(child_threads, num_threads) + if child.status() == psutil.STATUS_RUNNING and len(child.children()) == 0: + child_threads += child.num_threads() + # Catch any NoSuchProcess errors except psutil.NoSuchProcess: pass - + # Number of threads is max between found active children and parent + num_threads = max(child_threads, num_threads) # Return number of threads found - cb_log.debug('RETURNING num_threads as: %d!\n---------------------------\n' % num_threads) return num_threads @@ -1312,7 +1299,7 @@ def _get_ram_mb(pid, pyfunc=False): # Get max resources used for process -def get_max_resources_used(pid, mem_mb, num_threads, called_from, pyfunc=False): +def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): """Function to get the RAM and threads usage of a process Paramters @@ -1337,7 +1324,7 @@ def get_max_resources_used(pid, mem_mb, num_threads, called_from, pyfunc=False): try: mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(pid), called_from)) + num_threads = max(num_threads, _get_num_threads(psutil.Process(pid))) except Exception as exc: iflogger.info('Could not get resources used by process. Error: %s'\ % exc) @@ -1420,7 +1407,7 @@ def _process(drain=0): while proc.returncode is None: if runtime_profile: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) + get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() _process() time.sleep(interval) @@ -1440,7 +1427,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) + get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() time.sleep(interval) stdout, stderr = proc.communicate() @@ -1462,7 +1449,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) + get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() time.sleep(interval) ret_code = proc.wait() @@ -1475,7 +1462,7 @@ def _process(drain=0): if runtime_profile: while proc.returncode is None: mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, cmdline) + get_max_resources_used(proc.pid, mem_mb, num_threads) proc.poll() time.sleep(interval) proc.communicate() diff --git a/nipype/interfaces/utility.py b/nipype/interfaces/utility.py index 819987d926..c3f51ecb90 100644 --- a/nipype/interfaces/utility.py +++ b/nipype/interfaces/utility.py @@ -492,7 +492,7 @@ def _function_handle_wrapper(queue, **kwargs): while proc.is_alive(): mem_mb, num_threads = \ get_max_resources_used(proc.pid, mem_mb, num_threads, - function_handle.__name__, pyfunc=True) + pyfunc=True) # Get result from process queue out = queue.get() diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index a78ed7ddd4..3d3f231330 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -223,22 +223,6 @@ def __init__(self, plugin_args=None): def run(self, graph, config, updatehash=False): """Executes a pre-defined pipeline using distributed approaches """ - # Global watcher inits - from nipype.interfaces.base import get_max_resources_used - gpid = os.getpid() - num_threads = 0 - memory_mb = 0 - # Init logger - import logging as lg - gw_log = lg.getLogger('global_watcher') - gw_log.setLevel(lg.INFO) - formatter = lg.Formatter('%(asctime)s : %(message)s') - - # Write logs to file - file_handler = lg.FileHandler('/home/dclark/work-dir/gw.log') - file_handler.setFormatter(formatter) - gw_log.addHandler(file_handler) - logger.info("Running in parallel.") self._config = config # Generate appropriate structures for worker-manager model @@ -252,8 +236,6 @@ def run(self, graph, config, updatehash=False): while np.any(self.proc_done == False) | \ np.any(self.proc_pending == True): - #memory_mb, num_threads = get_max_resources_used(gpid, memory_mb, num_threads, ) - #gw_log.info('Memory GB usage: %.4f, Threads usage: %d' % (memory_mb/1024.0, num_threads)) toappend = [] # trigger callbacks for any pending results while self.pending_tasks: From de6a7f15d0605738cac790028bc8d27d790f82c3 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 22 Apr 2016 17:22:29 -0400 Subject: [PATCH 54/78] Removed print debug statement --- nipype/interfaces/afni/base.py | 1 + nipype/pipeline/plugins/multiproc.py | 2 -- nipype/utils/draw_gantt_chart.py | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index a0c9d4b6ad..a278206b32 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -148,6 +148,7 @@ def __init__(self, **inputs): self._output_update() # Update num threads estimate from OMP_NUM_THREADS env var + # Default to 1 if not set import os self.num_threads = int(os.getenv('OMP_NUM_THREADS', 1)) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index f063e1646c..eef7dacb3d 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -212,12 +212,10 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): free_memory_gb = self.memory_gb - busy_memory_gb free_processors = self.processors - busy_processors - # Check all jobs without dependency not run jobids = np.flatnonzero((self.proc_done == False) & \ (self.depidx.sum(axis=0) == 0).__array__()) - # Sort jobs ready to run first by memory and then by number of threads # The most resource consuming jobs run first jobids = sorted(jobids, diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 97a2be21bb..7a8fd259d9 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -183,8 +183,6 @@ def calculate_resource_timeseries(events, resource): # Iterate through the events for tdelta, event in sorted(events.items()): - if tdelta > 80: - print 'hi' if event['event'] == "start": if resource in event and event[resource] != 'Unkown': all_res += float(event[resource]) From e937bdcbf1b9b39e0270f648d11fbece04fbf3b8 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 22 Apr 2016 18:17:13 -0400 Subject: [PATCH 55/78] Finished documentation with gantt chart image --- doc/images/gantt_chart.png | Bin 0 -> 109205 bytes doc/users/resource_sched_profiler.rst | 33 ++++++++++++++++---------- 2 files changed, 21 insertions(+), 12 deletions(-) create mode 100644 doc/images/gantt_chart.png diff --git a/doc/images/gantt_chart.png b/doc/images/gantt_chart.png new file mode 100644 index 0000000000000000000000000000000000000000..e457aa87992fde9d2485d71ec4f89397ac05fb87 GIT binary patch literal 109205 zcmdRWGwGaq`j z-1>0Oxu>dj)!x;=6y+rlzT$ob000P5l48mL0HoUIYXS!R^A~Wo*ZR*l2xk!~RTvnU z)oq2X&#yQx;+ifh_GT{bhEApcb31!mQwC>aCsR{9XA6555Ja~C06+|o5))SS$UIwj z_ds8G7rdMrKeGW*EZ^!&lEG5k#;}z|P)9`k&`_ku;=Q-72`BvvfT~76?tF-Y6d_K& z&0d#l22IL8y$NF!m0RjL*Gsm6>b|qAIQ*SJUL6=GEh)-nneA_epdb7{1{faK-%7NfGcd2S_=k zff`I?O^Idqhs`^^NcnwfvXgAn zkW-XxUc`lq^;I_*&n9k$lj(N3aEcak;C3N|aj zMd!_C=T^g@tH7RG4#rPoA;#T6KjAb<+EYH0$M~I<(~P>NwHJKqIcs7Nda361R@VUp zP3TJz{N+bw7-r}7!HdCRnwt{toU6Vum3p<0=D5gzU2NJvvf>8mu#&&wJS}$er$C)a zKQ?OskkS9+eO)aE5Rl|LpP~+YUZ7cK9zCr{v*qt<=u1k(F;<-JW~s+~6zaiYwweLf z6#s&_-^n7{aNiUZ_*VIJ=;A8=zDN-nLnUxOh*ls@waEDPIyO}}p+VgGh>@cFOvl|>(`#L?;Y@i3uV#nO-W}4-A3m_YsF~ShK^VU z6fkw%`$NM9{)HL}5Kvpr$#!}^jEt;|vyTlzbX*ZYzq|hIzO}c(D$ZJP|6?RQ;KA|J z2b=HT_XR+o=fNBAVbl|G_4Sht#U$9jQ82H&l}=P247R#_L{KFw{acfnUs_Pjl#rS7g|m7Bz|ZNeiBxFR?y2pivepYrTKO#Yg|xxsh$} zf_G~0;1-FJ+E+x@3TZ?8zW*0TpmNBete-`%^Bk4wC`D?;Hhu}`tH)^KT)TG-Hal!m z{?ZxHUQ%*KpEc8b{ROMWmV=-Dk8C_TcGwh1r|BWFiUScKwDs2f95iuylORzP^q30# z1yIh(UUu%A=AsY|prmyS3d8U22kgicUjXFAi8uKg3hB?NqykHP=IZRMuKC@g zCRjk+aE#B{0;fR>^CWS*52t3H+f5|2Ea&=02BoAlit8!5)Q^34sx1Etg#R+t^haDp z37nX6ntpBrGw)wmTTknje*$>-81)I%Uw}lj*U?}pnJtgk7-eha{tbJ9V zj3$1oGtmb*91^D2?X0EENre{cq+T^`2U0i1@9VKpPktb(@naoAPfI7wD{r9eTEg4; zf%|ZXWHo&w>0m@7;a5OQZ3x|#I?4_`)UJchSDmE%jqenIIHyGIl&uQq9P6tn4m#>> ztSS1ZQ3VZPOVBg8N4ix71YdaamjZQ?&I2jz;+<(vd1iJoH$&lUS{Atx5WnJCt8H$A zsm=bd---dpW&H6(jV<@ea&$iW!H>bg94LVvlqgM_cIo+g#+0)F}BCb|#`N--M zGO(T8?6OopG);FQc>WuJLU*Hna$KDS+!zn-DEzTUB zkDPF@sNlr~w9%dH$nmv zldWTKPEEp$q=0~w2n+V@+EyMuaB4p-U59L7+bVt))qSM}pGi&4sRaf_Ob-9eeW0Tk z4VZ&rH+UfNYn3gbeswn+19|D)*SyZo4ljug=20KNHopuJ#w83~`!+NHx+E8I$ANV-qt zD3m~2Bn{i#aStd`!tLKd6aR92M=xn?`viT`nj7`kjs!Ib5YI=?$1*~nXn>c-NOl&D z83?ClV3A5JS){GdNC12SD_oi(e9r0dB;b(I3Cm&|2(iLI)m;hyC%d@xP8}$VmB($k zzdC#BZr1o~0zpTM3P^VA8;2Jq206UcPv)l7#?pMt8yBTDciz7Apj)p_FhT(Jbg`+K zDJ24o93|IFTLn;E#gzYkB5>HR2@Cwtj@zL2Sk)!=rR1Sl!$=x?DR|s&plpBLO#7)I_;AoSF=EImAtDpDJ`4xQg_mc4)C2@#Rx{GoMs3dN zHhg3R&!#cKFQV+HB3^+3o60&WClLUo!Z`To1o5SL0DwnL)7ep(wtbDT@37mFep!?= zN9J1kA;;A3lw=ggi38Zwb}NR}_jBg+zf5LS$XiGrTDeRgV3L5c8~~PL#5CdD=Cj*# z%|=y4-FsU>yY^A3{!H4JuYex5oe`kKt6N$C=)?^cfFlad=>H;m2F4Ci~w?YglA%VNsy_O5w>{W60nwtft_|c``?XIdoQ_?cI ze%oqh<7{ejYmusxl|};}uH!43{Vhs**X@q-FT(bRUaF=jAYTdj42H~)nAZC<>~L6q zK(EzVRM&m@-c7IjqZ9<_G2Ia54Rx2UU*WIfRXc*$gCgLd2GzZb$17w`lf@lJ zLsExn^dd5dpbRts%$8@MxfCpex+D*c9DqbgvQYV_eo2C~CF)Tjie{v?(yu2ge75XeXR&E0OgT8h}vwmo}cWxjMQKrgMYu6oC&(*&WQXf9au^I zc>7cn2@sNV$zXf0d(l5H0N+^EH>mYAAi(0kzyHM@pL!_6W$wJ(3vz#HkIok0rux1) zvK9#lydUjAAhKf>yk0>QjRpXuJMpmv`}Mgd!OT;?iVZMvY`inryeyOL%exxo*eP9~ zGM3DbkZe5|*k}NnvkrbyuYtV)A$3!BAjSk=ZI_I25{V(g2%~k?j zY7#0eqJ420=3n0%?6YmEo3GMR6?R@1c-3pxlrdOuidqZEWH!p|`C&hZa`50ffK_bn za|gEjb*u*6B_$~UK%I7;oW{#nUVJ~W!X7T4ce6fnSQa;d0j#o}aU-<3Gp9BjFcU~g zQ#PoMoS2-q+pvcgoET_89nGkX0!M;GdO9}=3LwC`+BG3>8$}UVU02q2`7{g&cxbO= zw^cu-vace1o<3VLc=0QQ{;XPbD>y3xTy;+2+-;_;6>l?nEG>$!b|6!GT-Z`_G~3I> z)|{`a`jyGly79%n2Gumt`#;S3v+wi0pAq0VMF0RB*aS=^>lm1XjfFi8e%96jZ1=Di zHq`6flbgT0i_#IKf1OyquY+Bgc975t)08n;@Ae9TUq&@8Bg_gPoZ1875-j??WAJ(N zZZ#2Xv)_iO8roz40c9JlRheoAFF1WrK@Bb+lLw_>{HQ_fY3j6cTT(%g>S0RBI8hmz^xK-hrhx7T`TNf_{)m8-Smty$5xlFT<27H! zwk^5)x4?Ug*2QUu-Lki(*SzxE-HR=uprJOiu*^0N8kY>IaHvWoMZr=|0&h20n1)EH`;Q-R~6YWM?5y zXft4nh>a!V+DcBDNcp*%c6NJb{wTAzDpMPZ8i#*R4zogMPLpERiA7)HjCv5zJMNk$aKAfth0A`V zdHV?j5fI>%r-mKZB)K)I;~N0C9h$gIUmqJCEbJQ+P+N87Jx>(QwYu^olUPi2)oa)6 zt&OQ$r77C4`3r-#Y-!gT4iXU<-Yzn^dFIW5Pf*H*voyycu%dmKAini`u}rB;?y*RE zllh*^AAxtSY)Pr?A{Jg&1NP@q6WkFBnN`Ul2r{vp;fxXu?wN&2f#Dn#T1 za`-q;i_JkPhl%4RJ! zPYSB4ZkwIWk2pc8*h#0GT=ZazH)L^_iB!o>NG^oX@w8NfCPR(a?V-Y?Dlx|kxYHaR zE6)N;Af=9HWT9CMu)h8$QQvB`KQ~w>1OycLPCefVzZs*Ww^%D+%#W%(%O9J9Dx9F6G8=5#$}f9bx;tnqqVo$^y$xTJX^1TU}YX|&7e zjHu@JY$Kz6@OQP%GT?#7rfR~q=q`G~CL_Of&!&}a+-BA?VK@rULQ6}j)*2}z&vIGU z5(bZ%oLlVe=FSN{i}`d0#s0O{a*5bGuJpJ^q3;j0*@Jq-z$h2{XCoyj67+3kP}$VO zq;%Un_7Wzu(_=H#zI)|F_6+B5U8Myyve0oG0wKq@daEbSv1T@lwuaaAdS5^Y@z)FQ zxq){nlGKAhk7B1gXZuPww^O`E-(@UbumI$v-@g>r(lGe;u5>siSMTNsoy-5K*B9|Q z{)vJeL7i05d7Kl>c}wrOX&-zjk$EHZ)-jfoa+NdA&1z0TPc9b?3XQ5R!nif9*A$KJ_>GEI0+r$o;l7@ zx8*g*vNYXD;c#6HvE+=Fy3N_R>c4(#BA$aP%WLacFMIEl+2&(V!vK@ciMId%0$tg& z^nngD+is$zm}hc`bGhCZ?KF*UV@MSrYOrNIy|f{eh*{5nksZY^iq&)!fG}1N=a%); z!uIQ=w7GmqTzjSVHh{5 zlXKbHcPG!Y)Xw4GcN#KnD_hLB54PXy^~SJXHBm9_3fIn)r=6cwfC^uSA1MCmUcGD2 z2SLFxzKQ{a6UdM4>G@Su^;L9qRIH7IpJmfHtbUY+bRk>E=tF%#A;A5D} zj4{mqD3G>?)Kbi3oVIJ?@<`JrLMOn>*N8B+LfEyTr>Oxx2e+4`OX(DdZ`eLgk0U?r zpVvT5Zu*=JXr&m_S7B-DoGX*?*=lNs_oCx6*b8{?xQG|T#^Cdrp47TPKxKBep4j-= zIm(17NT7Z1m5E^gULah+K|Ty!fqzIREi;+QDSBHAU3@RcJXV&YM+=t3K{vO>#HH%d zRb8JTo#D8->+3SVK=kbriO;@@|mG+D$gpaqB!{=l(#nS z>B>qbq4ny|)YWuYXESx9-}l#?7vJQD<>y~7F#l#M_HXL#LY^~`hdDT^y4pbw=dqWh&M%agb1P5I%Bqw{z z)ur)!hv?tYaGzRPnIPH2rAT&)ld=RP**nW5-NonqFSS>6T!`6vwUcS!Du!?w%3oU4_wBGkicTyM5n6 z4;m*utCZwtGepG_1F%T8>mhL1ev+m17KLP+ChzX_o>v* zdxm8v8v*-jZOZID_A2gNyvaB2;xu^T-#d}R*mBr>#V?0Jr;e`_l_Ma;6qiJ9wzm%4 zjHA8DTCOePGnr57Dh?>fAIJ0AU0b>sp9P*h9kj0(4N1jLb^AMsoW4)APhF!p=I=&g z9kzK*Wk3AM;*1_wSH=H(D2pw~#y(2ibz)0cOYmxTMM7C^PQRe6xg zok^sR_5Nm6z~{vobiV21@eyuEJJw}77NNF2VN18T^vmA=puqbJ#c7IywS z{ZbNpT9Fv~GpUo!UHmDD)Bm^oTzFn^=m0Q#XExL-V&iQ%VHX1H^>MAR(qrccSn)2t zH-2XNxZ1`1Pc!&7$MKFjNBP7^V*O*hR9>D;ZKkt$0>pkk3+n$K^)|@O9c$6IIV{=z!dc*feXuk#}&xc_w#*={Tp1^wrtkJyN=g(9^Xf~ z<9;x|8DYIE#=d5sj>gu(Z1JIut%y zc2-fyVlE-d@vL_xoyYpxzOfWmdQ3VhhGSUb<$j~7UHIm@mJ1dN&`9q(s+;;})4^od zbCTYU$LY^bQ)4MjSY!UgFxBfKs^Y(FhhXK6IS@r8Yj)5qx*OE$-G(OTDV`D_DGUIZ z9=M8 z&kMgmO}|wY6CJIC3}m?cX-2Yb(!tA`NL)gS{;)pgh)TAPopx;#mGjPln(2$RZLqAD z@|M<++V1W1iVPgw3RL#~?)Odn^t1$Q#_p^6;S3JH>y7xwi~f=|KI$Pf2UgN5Rnjj2 zWD}>lkRNCI-KpI%kwt^|-!p`c3ihz7Gl>TdG^_fxbq{ZW&}tay4n~1Dr!NY>(`}wV z{xX99({?IV`J}L(`(cp7?*AY{ZK!FjOU_6;-~LI9Ek@w>HdzW?h)iKxl1T4=mjpa> z2Rd`2gC6Znx2J2mx|xQZJ@j6F<)>To5~Iotrk6GfCz=UPYq&e+l~m+faZh-^>IOix zbRmEt6kK?Fj8D36`nk_V8>}sh%EghU>Eq;2b=}2tTc&beZgFk++`R5>JiP$zHJg3! zjPzf{1f?l=zypNZ>`6a0-+yA@14S11CAUMsy`7VV5*@ zEU$Z*!ntopV)(C{X4%pVnI|ZP%!dITyGOvQqT^Iu-zJJ8=x`$15LOJ`UWo`yB^!j? z{?P*X$WY2<#UJykL`;a27Ux5|Tsg<$bR>fbM8}SSg$$Ve`5XR=zm%z`W26KCumv`d zr$ouH;8Nd~H!}h5ka@Yv3<;nnHQR|~-DUSq>2AndqYF2=-U3rXocfhWwTcY@fCb#> z=$w3=CNaI5$M^OZSK-C8Cq40=OPYqzw7G)OeqRhdFug`GnqjI-yZ z(=K$M=NK$c{xql3PQRh3w%7ZZU7w!W9aLn`=S&RTa4y>y^xFNG9`V~uena}?tDa@< z1%GrBG!05LU;Op%SQD2Kzi4w77Z?Bea~R2v$?%TZZ0B_wg6iw3w$&6?O47F(h%XN# z@P=cEL=)DxFn*KNj_TPPPjAg%_Ql zID0qS{#7>lgP%eut_E6mFJ3-w;7m?QNy@2*EV`4AF47sjMmk*Zf-U-vhhG`c+%YG< zpUf_&maK-r7_V%MsG^A{6N8zuxU|T=xRfJU_u)~%N6SjauIp;KB<{c3+V9O#wc>%> zr((P?n+p@%hi!cE=fiKg8q>K{ivXX1tcK>VKVNt2h_;%Z(&jdb6x8Z*bcI_c5V(MW zg~gfhd@GCwO-1dg77{vykOoxB;hry7q^c-LEOgE2>mPtQMidxlXLF?#S`j=`HN!sgVL7T z+Di+x87m5eJe+Mq=DOam)>X+IRFvPpi-)Ei$KbZJ@KI8<>4=zV*T>v?oK>zNqvk9! z{6WXTAV|YERkRl4qYLQ6$IMt{rQy@=Z8>)`2>V<;@RIr-#d338^tin zQ8GM$n`3BSja@>x1<1)snU&AoWfpp`47X9@^Wu4EC}t^3D1KYEcMi_>UJ6O>$2+bE zA>>K@5^h}DSh+wfc6b*2-&z0w%?zZ3cNbV2ah>@)=>n~o0wk~2;2aQ_~wybp1Wp@Ae-f(%wGqqmv2r~5!>L0=Y$uYkam zIjX6wh9@WydF1cRG)25PVg}X1q@NX~6NZ#hH=m@ms&-LnB1CtnbE~HM#!D{{^heo~ zHuk;6;#eZ*ab$^b5+&76r+z1n{p{e7Q1XsI_w}O9o@=DD;rkFZ&!nxv&F+1__Yw#r zmT1qMX^0+C+kMjenfEmMMn4TLub98Jrfc@>N!%LDdhXX$w$>T;%`-2jC7m-T{re?C z@8hxPn<^F}TqcG8@Q}TLE}Svw?rI4WQ&K{$({Fly*c(YE?rAJ_^JB{1Oa>v+{xsW) zfibe}ruAjc2)?jn>&@`y!*jCnvP8*jdH;p@+qAmU>gixID=EpeliiENoPmL+xz~Qb zZ1u@m_Cr@ip}ptT8~jr5c}A+=4Vhuv!-eK^x!dx+8A&3;Stg0cWAhvY22Sx7?O4m@b~^_`pfY8ldBc*$3TMlcf*6v398PaSuaW&F z#ca3Xc%Uog&6lIo3}i65d?fsp=VhpvGu>$o#8|w&AXt?koB^eTc@>EoiodI&P&gc% zDsXrW`3TZeaOeu7^p$c7!c$it=k&DLwpzQXV;{(kf6I`vGy(aNxY6s?jr!ci!Je94I~4V;KHmO=F@phNSMHAN!0P+i?pO!2Hhf*;BP;C z3n{ds{)dNIsO5`QAuNKU+x|4e^rJy^-MET)&K||6Y^Gu^hLt-X2V9q#!2^t9~@7VbC(pwpEwBa3R4%=8?(wv6FAh z;o(V}C>sxJBj56<<2A*QD}~;e1ncg^>@GY@b`{WN7px4XF0N7^{*%7FLBB1&Lszfl z_h)o%*i-Dcm7Z<9c@2%N*@aG1A&p-w^iv~E;C}RJ;Q76Jm|3jGoI^n|h@I6sb{fI6 zof>n4;Vv?*HMXv<9SZ8V`7G~p^F$m%fdPc53fCWo9(bqDJQ?%+KF*RU0K}JICF?T> zu)3^&C}9!`slfpO&N{m4VU+Y70*L7Z=T$+Hg$XDCK%t1j&1^A8_rw?kn8W!1VoSsg zJ?8amj0RDw@$6BT@AI0^OGy|*p!KBDo4!tYkC03;xsypd1R21!#VD`=~N-?wAS z#&~7m3RZh(=|Bi3f>bI203gIyb3NZ_ITI_;{~kw*;+cnx)a<-Z5SqEYzPB`97)P28 z2>|0lGA^^C=<~jhMaQ*(#c+KzNyV5Z^PmfBXJfDgsmQ4*(EV(bm~CqH(y-N^w8} zXNH14h-M2QR!fu9{;8eBcboFtJtoBkQm7IUr_%%^nVmb36~+wavrr;Eg;n-NE<4j0 z_1+r-o-SZ-){vQEx-#}h>*N^E`9{Dz#tdji;v?RM+;=$VnEkD&*RUdz^3+#g^CuiB z08-eYPGc4g46@f5fA_j=Du<5Cb$;D?BOK=XZ!&UEJ-xT3i|ibm>;5mQW~Y|}BNoq# zxN!3Ig{57lXFpG^y@7Qy_IiUf=Nn!fg{n;k6aH6e16YHD2WS}Lfz(*ko(6iXt%+be zZ%YsxF}1W905GM4$f*4|sIhs-ix6x+NhHX^ASo6@uxh)++wP8q5cx~xw581|c9o0^ zCKRoV%1?(MQ4+LW?YNq^KAO;{_`{RL?bFXA zEVSvLlgp?ll|Et0Bp=CPHLp2&cwoK>skRvMyG#?5M-auJh`Bc-03fM~Dh*dR!;_^- znquL#?kr>Ddb}j@It)_!z3+?E)eTMlI_ALm{43(=f=7+QNH}Ql*P5K=$aE&ZkWps*s*aXy>MoAZDi-bbQ%YLS6E-fdg z)rvd`EdGbrh}UwV@!%P4zg-SOJFz$gK( z*r{n&^oi7T*l;;SSjI{q20${d+=!ii+vR0We|*y~M*;A8TP!0dlo| zi*rMzma`jfuftFvqM#+B&m`cHjrGI0_bP#kFzsed0fO+p4j^>epWn}p4pRv`=C!qRFlhlT-5Wzz|Onr=_J}`oilY@0|8K`PsRxr zk81BH35WpOP#ck1WN>#^PGyM%{A2N z^cVRziJU94Y}qWoOOO6e{yttoHDs=#smnr5+T!pE1H`|hv9>yOWh5pi1rKJHfrnf{ z+r z@&|wLt6e1hk)@fT1pu-VBw4`LIR zRA7%YSg4_9j41?|V7~+Xm^0*XJ^t7i&6wT4fU$b`T_gwmVdz)*Q{L{Ko5Xc0q>%`v zU!9~pTzs+;m-PKNd{f`R^ohCs1F#@d3^luJY?sz_^aT>$?njWqN*QCvTJJ~EDg-?2 zH6#u~Zb$j8D*!_D2y)Vsl4r~OUYGG%6Hg_ybKZ5(3R%D~`19=F)g=Mu5MTy9V-EOO zm)^emL}UrmNq#+1%rrvaVb+xMyU{)7M+ZJZ>Ix1nh$UXCQhRy7rU zZx^bH$@EP@b;|&|P`JZdhDjAK)4D&IzmY9f-{=t?syhbZHa1Tq&lEWK&!1?J0``)b z-(JW^6}p@rGBC7u?asJbja38yikXz2XVZ2kGLc)5i%RX+@j3!Upfobt)wX7Mq1>dC z+9VQwxmb2MxQHQna#A=Zayt8*Kesx6nM8I zPC@Qiw){%R=K`bxqw_6~oojae&!%pyGZ(BNpLJ5gkDfh~=pZA4Q!}>=>zr1v^)YTE zlwRmpftuAZhmIbvb;251dQ4C2>lFTnTZOh`R$pGZAN^IbT3MzTWxlL9u9 zU;Cd3lArLp3=9l+d1Cw@7H30e5-+fEDMvZIm zLh%NdH&46GQ$&fm@5tEh7y0(XW;?vy-< zWtnmX*dPjd&&t~`vc+o@hV&mHSudRQ_32aYjI)0@!qU^j4qD~lw+(LD77%3 zY0?F@qx)G*Sf}%*s>B@v0DwyPYsWc0cE(O4;5+@=jw`77{CB-l*XB=$<-M&t)l82{ zqioR(GfP=`Zc_Z{NsR{Mhh>xWcSybt0n66{B-x9!$|m+W3Czx?0wTzv5*SrB^9SN8Qkucf_2TX4+KuE+Ualem0e3%@$ z5ZzB6?*R_oxz8Ahid3zWA_QMmSMRoPBz!kc6gEHH{Ehnr7wOZCy6^)a8GAhR0$ME@f z5#msp#otesB;LSj)WghU(!OoGd}scOQY7Lcjd%Hm3}MEV4r*+e$|V0AFVI;1Z@NoLsY{C~t6bj=UD1f&((l z+H%s|+N!=yn24YX3o3I_U5A%!?k6t0jkmKkv;@t((~$$5Q#6g($nvT!f!!CFz+LLJ zMT>bl>YNE9S1$-YCPn3}nbLR}>dd%*aLTkl#<(Dio?x$v!9HnLar|zBJTSG2zIt6{ z`_Dxg2^fIOz5K7#$^S8*!tg-9o2_7mU()?L{F2;@LR(zbILURL%z1DZr*EI?F=Xi6j$_%Vk3n7( z;J%dedfc!OmeCP1zFW#1<8|BnOSHh(Q?Tpm*sxpvdmC=Y*&E28TZHL?tM05*;hqi8 zSnY@99an?B|6P~9aI}t~*TuOj0V{`{4_D*e^aGJ^Q!VjD3=&cIT*LDnXE)8x%Lbcn zCkQ>2<+iz1E4pIo?2o;Xi__#vPiS36_C z(BpO!@&-v{Nd}ou&Xhp5orRl>M>>UY;YP?$RR1SKLzog|&$9gc;~mv7y!*K4S+yUD zvPSQ41^;vDtrUc@Ab(!_>2fNi>(ZQQD_j@12r}TuncrL%gy3>L6d-`uQf2)2;lXC| zw)KP_+v#!5Vs=*I`Yxw;AK09sHJASdywtKal5K^B>i$|18d1f;&zO#yuJtS|?^Ejs z_~Q}#9OjoWAd>0e+`fHPX>8WRl?<6^6T6PUr6hHqUj5#2Yx__Z*QIRA{2_x5v*}8E ztT@xgH@B@lKba+4ihv|*GA~42=ZhC3xasG=g%`$7@Dhl$jr>D`na6N!Saz>VfmPLk zH~L+^Hjv9+9;;H{hm!2IJFem3J&%@oz3|}lyYBXr@!BcGp-X-rZFZpl>qj3dkFTFQ z7Wu%T&c>E9AJA~-QY3-gnBTVMj8RgB4?v39Au!jqb?odb2rh(#EuXhEX8CM7EHOU` zN0=Crg^i3#kCvCOKusc@Uu@E001lrTpcnl*kE(_bI&V+F_NQ@J?eMV6h6~)Yq*xkj|OGM^(w~tkI7?#f}vxw>C2Q6(h{rVP}wVhu9v<;g;HQJums#5RxT4| zt|e#PS6pZ`2(Yo|OYS`_hQZHap9P$CWNi2`8Yx(A^s?{EeMA9WB(9Dog>geRQ~6ay zQ4gjzIESD>!hegT<@yMoc17iDa;-vmDza-jeE|n-*d3N$*I(~aH23y?nVqw6Z~3N9}{2Z&;YNXoyUAFpg7sm8H zHuHDKg(XP&i$GY`9fwBtTKj@Zd7-AI=Bu2em$Ao5Fek|WC2Y!o+cE;g?l?-YJ3Q!; zSvL~$ZT}}Ih!n;%e1(aPvo)k~vm1#VbkFLRqg*yUMhyTco6QHdZ=ByQPv-Ep83&8g z?eJ|&l_@DXHAw<`pd9{z1ka$)xxc(q(#z`S8$=O~z2gUa5F-_!N3G-j97|xdf}zsl z!Waf^WRP_981@mHFyJ{^S`sYa5Ue^KeOqg+?x(%)?A2N8f(qN>&VpzA*J_{)q@?i| zq*KBBzq8S+?hlhNIq79Z18gL}#xs3DYi0cXO!oObtitY{ANxIM-p$o-+wHM-*6c|g z)?ZWVLsq8@G9k?P;h|8|l0GR&p~nf^bC~#W)WlEZcepR?cBL(qo}%)6yA9;?_;@6T zlBXy8d_G6r=!h!FZH$=}d6TmTac+^Vjm2_=Y1jnT%|uKB{^i_4rDzF#dtE0DMQZBq zLjUlgkjRqYT>b&OP=FwHG<1WRlLN@W2k$ldPbY1`!^07&h=&-4))7DC5p z{sIVqaD@18D$Gs~H2 zGrrmVA{5A4Y@hQ;9R0e3mGL#Ip^DZYG|5kO(u0hHWv*TdUq{)e>}s@MhbdecsSqCD zW*xhq?X0<)m2xnoj#Q#tp%*5g?YOdkQro1Of}S)#DBohP`CVDj_dQTVz2ur*9fN4S zsaU{rB~$7xx?{@8ZMT|*mw|FsXEYM-A?pHWLUJN*o^aEf7)Vj-4N`5;E}IR1dtXfS zq`*MnC}}F|n%M#ToS_GDq!3Y;JG=ZnW0;Ur@7AqKkg?Q#3;ieae^CDRpWe(I1R6d3(QjtBIdJnMPC|5 zY-aJsDO(Lpflpe=pAb_q_Q;{UsJW%Gt-5JKJi{no)YZuU?zny>rTHNAi{)fgyS*f{ zB*qNCQxrq1r&$6WMrn6fNqasZI_pSqq3E7vr4Kf8}o}djLhdzn76+)K_348 zS6j3hf()sfAM{Vw3_(6Yy=>6>CsdMz{NJK2N!nh~hFd=6?zU@wXR5YgLl%o89^GS8 zjuDr3%Wt8#GO-Iv{+9hi)Rmx+D__)L zQ_^a`H+MSug$5SbV|TdO5WI_y)>N(gn|knFOWtbxn zs<=}LdI0Ty|0U6UW~%NA0$9j2)?{loF7JH`R5OVZ(H-HW*C^Fd2BySb=|l9 zzHe@BW8r-nUKRg0+`#CakG>+&pq}>as3geY7V$Hh@0A8R0Tx%|6Vd5)uAe?>S^XmU zY+e@m=Mo)MV#WKr;jyGxv$djpn4rb`v>U%#dhvG+-9e}j%BqLjmVv(^k{Ba}JcyOP zPODwlBQ!H8LtSK4F$7swiV-mWKKl_0hwrsOKXse&eNXI-(^B4MheWlP+d?I>sEsd8 zoktT)>K9#1=GsZvpUBjCrfe5FM$2;JF>DtO@Y%C8?Ae_fmf6Kf5afG8J1XVrw6hnV z3s^On4fp$S`R_1J*F2x2G)Wdm9|?H9Uf!4sK5c0lN(Y|N zi{{9R_4~UqZPW1!2Ax9a=EGGYCpKhuojSq0<5TtbI28H{n31-t#k#9$AE!6Y51v`t zSQ%Abndv7E)c>so7^~22i;7PyQ8>Y(XgUCG)Lv6BIp02xbOs+sR?NfXiasAgN)@4^ z!XR7s&40vpz97a{@JxU4v&-0s^a3R?V730~1Tg-+m4`vrrd=k_=o+#6OKu=CN=cF> z*Y^dc6A!*+E5;j0KXTF;#nw5+aQM<6U|VZqM%~N@0tFs2w>@NYY;DL_f7h%Gc~L$ zCn2B1xB-_tBJ2EH7pLSkhpo@?@|_N;)?kW9Z9{>9ldLvhP3XC`Ku1ZQL_vn%?!$$i zs@WaU%4HuJ6B3e7^In~}_GaRbYug=LZ znbqYsdTDcIb6W*fI^SgZTw^>t^Q~*!_w&yiJ}2}92A-j#evC5Gl`_D#LVJgWr+j(& zJ=4}t{qFX* zXF^)*!&^)wmR~VarG zNScIeSVMqBcmyNqz>{){Wc7zR>rKBr8X=4E1z(@j>d=GUOB~}F&E(}$H8^^bB*}NP zSPot7jJ&1G%pto)2YOK$iu{+YaPc%AvWG@LY+9N3Y(%NqJuOWUnEr}JbvQY(``8Gv zdmtx)cY9h!hl{>xX!`#m@2!I3YNCHp;t<>&g1fsXxCM82C%DUm1lQmW!QB~baCd?Z z?(Q(ypm&n*f2!`&eZ1#Xoqp=6y{CJ1_u9Se*Q?pjd_1)DHKM6E_-A;DJm&P1F7z`Bz^&p^M6z;5jkn_OgFL+d#O1Lu>9#u-~w zk&B0r+djT1I#mz5wE!kDjIufHW1JkHwWWd9j4q4up$I%#mp$gnDkdy%_x8uDGSIS@ z9J8VD^ycE@<%q+v!*a()1&!`Za4JKko_3jezON-bq2*oTgjPwB!-kzLTS{T&)X1uyz zBK0NHupe{f&)~Q8KbW~MP~{5BAxYVH8F~|dVxTq~P8UiQ+)vd=GM(rwsoYZDD zaOlPyxWCzxN0;F;W~mZs+&Ij)L+&PXn;Jn*)pjxlZYv2TUR<@4=WNk*Z~xAFQ;Kg2 zK9w4Ic&{VTMstk-4jy@myhUACtfeU}g*Sgp$K6#9dRVMaj5j%bEWXbyoOWabF=@_C zs|v9k{Okd;IiH@Dl%}SyFd0&hBhf@{hECON3VvAlM=BeiZ_Pp;<(F6(f{*iC1B`MG zAY0`N>kBEecH)jleMn^0^FZ?9xSn$~Sw<)NLEAM7RxqVn0L=aN|Gpa zQcOV;A`7RL@_lVPhD4-$duu2%_08~whKpignS$akfCcG(ANQ>Fr6spU3S}6HPlzV^ z!IpK%u1h>IusocvXx7kwMBjxtVK{d*Jrke%dh`QF>DprTX%tt-!{zwy%i{)dS&_N* zuYkWpgIhgYhDy1+dGMT`WTtiax)lU0K#e~lbCTLwt7ts=IC*J@t+OFKuJh7xFqS#< z^I3OpQTIxRQqr}5&*#>*r{9EDcl67Iv)8_4zxdj6T5gObk5&%Pl&pr~gbP+5>~$=Z zcU+CLFlQt;`uzr@GMUaCky}mL>b4d6CD@BWGC)OnWSha!tA@$JVCB zzZuf>5_i@v4PP$g4f}qS@hO61Ggkt0=uLOZbu5qXSUN6At9U!^7k?(F$ZGmGTn1Qj za|YQmP>A^vqF9YaXp9bxfz2&77y{9P0@EcX{-lR3x4fIC#`VJ}E2|OA8_s$2^Mit? z=0rYQ2Lm}K2U(zFWVx((D2ptfoQ&}<*4TZf?&@kS>+O^tS1fI8NMri~ty%I!cDkl= zf;pQ<=`2zNlZK+pw1c#%AwBLmEm_R!ixc?YM2;7!LZ!m*73{q30i-J~xej-RB2Cw! znd-%i;o*~PUPGwHWCixHkB7^Zoq_lLQN*K#PQj_`*L7IF2J?o*|5&b z9UC2G#nTc5+pyVUe3Ru#@htoK=49y>k299HtgK@%M}U4y40PK8G_7A*ZZg09GMqKv zR*^H74tC5pe%}7&(Nv*ZibSno*vCHy@;b^&UgafPI!`%Qz(wVFAe<0(wY>_`^k`pv z=)eE^`r%Y(A^GugFt4$gLQLQOYuI;j&&<6028R{)`+=AC2ZcO^jX*k&m(x``$t?5G zV46TQS+l54L?o8tnIDd&Z)C1GJKA2(1D^YznU-k4eVZW-+^&t1;I;;z!fCD+H#wg( zgbg5aQ~`yLhv4Mm8g8 zxxHCCHE1eu*GUW_K5!siyqu}5J#ywqLWmKBuFO54thwxsF*r2a&p>WPd?BSGX&D85 zkji#QVaek94M6A?40u#$2&6P#yiQhRsiAnuC^StWI{UgK3ffqPg<;{~Is?0v?iGT6 ztD)z80er%ep(UEgw%bw__54!CgXelSny-wp@oNG%R8>-`OiLyXTf>waURDq*65~rA zPwKTzmU(aWz!BmALYWHTy!fXs+cAyw=-_2$!^oXkeo$CU;OhYRNj!P#x-TQLVbYzr zvS`6Me`QpJvc$I5;2tWqR#cbO466w*S}1+SoEckbg4Q2WGM%53`2t+v?0&zzb|Xsn zdtX8z-?Z`Xc4I~HIEC!^Jm8)=-h2~y2gNCgC?uLfEjIUyAuBy*QuW^^GMm-uuzPZT ziHs96z2CB~Dxo9~_{^GMH2XeYrr>Wq;iFF8Lgz@$FP;=5HSaPRrUB6aBRw^1Cds_$ z_d=`Ek<$zI7Q|7WZO_&D+>7NJOqi(6Qcq5)=ri7`dBXYX< zZ|0${^e=FF8NUF3f353h&x`y2t-+i{NSCPpYWaxUr2i_?xPvg%Fs%GyhP^I~B#SMP zL?shP3FQoDpL=9C(4g95bl$M-M zHMWrVHm{0~NxOK0-C6}2&34A3wneM$`efM_yKZ$>iJ5Nb|5S5Oa67@NO~cXwft&{X z2yMGb!&c>}K4F4&(%Jm!1ukDYldB{tS76%hfAv1Z?$c$(zG7u zjbf*Y zm0%<})`&g7k`5y8#}HNU%F!??c#et@dK}$NDh`iEVx{8od$Q&EI!6k)X%upxVGKAn96reTt4jz@`jozH1YQhxfodQD_6aFk!d zrF<&`0$wHF&d>EX6?#qrIhe$i67`-E?;q)=?_gTrY9r5?Fn8YOJrrypyfwz-Eu!@U zZ|z|u9e{ne$e{S7o5=pyTN+pO;@j71ICJ{2($=B4d^r;#k~$3ac7NV@&TjX<`N-PK zZ@q9ia=LtbE&^^>=3sh=*!75^&00}>-fH|%C-QQAw#>bh%6mR?)c#VmW@+)fX9HO^ ze7f2@(GD=sfI_%Q6Grct2?`ewg7o9z`%Ok)v z~G^IRWhdX2_oR~un-sTl zjc~D2_FYXAQ8*kQqWCqB2EzSS>2W!>NxxH?$+vB<8a{7im9(C9_EUV57(kP_0-kBz z9wt%rsG&WEEWUrFGHaEn?3?UD?*pD(ga}e+v76NU8)zpEgD08ll9nS;)jtF~HfTp^ z_!j0IgxIb<(j*TpGS~&uZ*&>)CKQ<1(Zu6A4D*Dx1Sr=u7O>FHrq8}Vy;Am6yU953 z^+Bq7ya)SmCG?9GrY5TqXq6*=_)8rhj5;jG{^1rGwqe^8@wFtHm;2Vn)0>BvlhW== zk=3>8Z;uT42Idx*8h&S8RhoDT?m#8Bse9TgG&*Mg`DDvb)0^FeShXM#1+mL{oh@ zl`i}Y6scf|uO{C-dKHm%GBpZ%x8f)>O+!@G{lDTI3DtsVC_6ZCqH4=)TD3AAxrnd& z5pn|kQ^w@I9_rOKS6sEfb@MoqOJfDICGJ|{{Uphz`J^Jc^I0<@rcrGtCA`pbX@AN7 zNz7O#Co*?15JMD?+oj(O|X`6A`03J80XY0?W+!0)bx03fEeq;iBgy#`qgH~(n zRubw%J3M2a&(IJOFK$RFL4-z8qlFq4VObR@*YWmr?Gd6r`zT1Te<@DxlwBr5i<_#z zJXjd2d^V``Qf)gA=Gh;fa3)Yk+RK0i;Rs`{Gp&pedeHnX5)mU%JDiLClAn|Y350HT z;gTT^91*hN^rw1Pu1CQ|h74F@iS9kW)6si=u2AG0Y8!UQg3mWepOv9OUVH?=RU0~H zdam^_*r`o8z2&A4B%bTO;8*MMuKDR&SNWVA2Y^D{cD);wsefs1;RpjC*q^4JPr~n5>_5KOv{=Sm~bpE}9Qn5Z64L}LjF z&52Fw0eKLgAj1e{v+t_(1heTOYa@T!RI4NJ4jy$E%H3ZmBOwKhtr!=%SECyd! z!KB>!X8#{lwyA$QH{3e;_emIP5?20+`oEP=`o5z_q)L38^OIA)<*IZkLO#xwGCzIa z{e1|B8xGUbV>egZ`}3ksR(@WTkER`r6q=Xf(+5j_SR^30|KVi+ynQ&8OB8Rje7T)s zd48@ca{q>tS6D9TsnCsyxvuLNj#xN>9%II_N3T%Rw)7 z-Tf|}D5hB?Db`JPlxN1tZf(h-t)*Q#>#{j}b{9t4_UEPNYk)WOw_!Y&&`*48dlJs^ zwu|v2W;#NDmy=)=()4t5%B~zgCf!DZi{>gr?bJLcd%t^9RHE%9^}R(c!|8E97Ov-W zo)-fLsWlj89cJcx^Cbd!6NNo4f9xa^&$kPWj1v1iJ4|S50&0Z88oX{@-37*p20V@J zQ$sPh{=)v@c!*wl-;x>D33E1w>;yvQgfJ8=J9;hM?uEfRVP?&wDu6w+)ql! z+37=VzmK6oeQOsq`_A2zl5B)#OCsN`r^6rG;xo4$CtvAt0(rQx?zmll?LLtQcsWp- z)hxuvR-C{woJF2SRyN$420Tv(XhXp)!W1v8L<3HTx3j;BA-fZq4fRhaD~gX$MbJX@ zliy;R9L@|*nz}dH)=J&BG(Z}Agt@_*GsEc+_gmIX_3jy}ZQ<^@UwVUu{!4)_bq?gO z*n<0Xl3|YxCuz=Dp#ccuv8v-agOLpQt}%Za%vK;qo7J24_DW zmkjWb(lJG$wzZ!uIIf_cfv)FtFxNJp25gt$^L|+!O)J_9sa&5J`nrD$%VB7_;8^o* zxd`E~uc}%A`#$k}1LdiO-e3`f(Yvij6TvHf4t@$b_hM01s&Qs*mivC*?9n#-5~8Rb$HJ&69ZEcD>V+dmReXoXvrx9lbb z8$PYcu?SnIr%E7@B}m@$-v=)*;x|LxvsinTwd3sFGx7HA!ii48|6JB)$me|?1W}C8 zx{)p}aCB?kRQlB{m43V(kXZUolY{j&wOh*YKAW^U0%tU>!_1v}0Ij0=G7Us8q8C-N z$=7laRY5#@%mO%QC_@}NuM`1X=bTg>r>STwY5xApvL~KLiNsx(JBE7NvF2v^(Oexq z`8kh-9ngDqYkJ=|r};&>$`(Q~uO6twiNdEz(*SD3ZCcdf1l2re1Gl^G=JUue^T_nQ;jsg8?VHC8+pv0iWU7Q1>N0zc_Fd-)Ph8>`~Aw* z$%bE|@KlqhW43els9)PI3K@2>(q)V zV|9vZMnn;t8P82Ea{z7bD?Zmg0X^r`u>*@o=`;D5M*?_ZVO6T#nD}2qgZ!>GQKdA@ z_mtj?m-9$@6EkIYLIsG)1_t`S0W8)s&v9iP*JG(P1Sobs2NP?6JLr)^7O*!?Jh3E~ zHm;lYtR2H#yStZ4TLr6ct^Fd3DP$!Ys;O7|yW1EIPZUogE>xWNejKW{;Uy~O6ozJ; zq50PagDJb?o<8fR^2~9)ay}v>;>G=2;1Ruh`Q7_hj)BUe{LpPF`ds) zZolaW4(m4^v#-<=)#VmgLR*@{j~+=tv-VswKux|*#R&Yz;@@3+gw8wP(7aI6bhk;( zb(|0P`yrFb1@vgA?;32kL#LjR)) zfG@D*y7zfSz`Z%hiwE!=C))3_&LZ+K;B7v>GNNes`U|?BUj=AY2jsLhM=ZtP_L@Q| z3@qRRITp2R+S2NY-5UGgVZXRaFq8>+I)rl7fJyI1I=D$}&iEQU1`1(`RI79w+vRvn zHO!s7qNf@>)@rga2T+ffAMf2gx@HX@`!6xxc=)e6|6M{jUPV0FwF776{3eIrJD#@L zB6hUpvw}xH3=Lby-LIJj>F7uE*Y2jjc;F)`4~c-&N6uEW6T#8AFODzRLe(7EbiCP zGSJ@t^DgF;cYbj%JYF?Z8Xq{at7*AEXtfov^~PADkNyG=m^5BoR!{cyZ*ckPs?dMi zF-(=>UVZ#;WBVR~@c->a`TN&T`Y_Y~Z~5Hv`T0L)cMW)ifqG%(;PwABI`!`VkB070 zAH`!aV!scN;$U~P!iM!gq%rOo3td!5|@NqfDkT8#en&gZt{uCb%sI_^Y4 zy&JaE6>cFNgnHbVL_O1kiqM z5Z66XNQDc=VeaX(uwp4~V|E>Z}WE=5GL_@S^)lsm&dJ3fLP&6E- z`EHd|5|>Jcd8XLNbH|70luNQ6UWe-<^}>JopS6wItbZnD!37+^>~heW7%#2-F&D;<|LzE|%iaaS&>*man>T$A z$k+1B?=#1mnPwm7A7|RI_B=}6LxEcOK3f`QC1VTS&pSNdlCper-d%QuWDF1Ef~4+p zbscnc{tTO42|l#Zd^mb}3aB#B=yZ4*emoPdb$7ZRwS0N@y&8G&2e`yiHJ!u?A(0{F zxoeE-m(p>$ShgaSHSDO#XIJ_^j&(e51Ztn&2ryJ;@?ZPW{QE}WC{L~~9Ms}4+4D>m z>Xxt3(F2TK&`AO)X(W%)L!tEA2jolA62LfcG%jD;Bbzl;pVPVPprbTrJmpS?V9 zawPf=MDM-aL~uu6lHNobgT`Zq^+@CrMmNtmvZMQp;9C?QxTcc0&@U}S~z_8#)a zhr@7zi84J0R1Sg!I1{u=DEB{2KK*I(0L_$bQJy>&xRlm%sSXQ!NbSY5yc`q}W^f@^ zwr;ubj1Nv{0Y#m_RughQe9!&O*cQQ5UOVdX)Iu+hH7EA5L3NSY{r4{a2ym?7w||cL z-MBrGDH-j}&ExrP*g50lXA`4&LL}N+gLQdM*)4LaKr&}N6>$wF`0j`PsuIrk;~K5^ zH!Ox+6mn1BOwjYdkNb^~$`$BtI*SnGfm9GLm*1cYo}Xsp zCywtIyS}|ANGFths`Y590@YOj56)Np9zdIgrT69qt^2k|r*1)}UC+Tn=>+7#EpnA9 z$tCp;-o}1X%NiQW2HStim^3Ou6~;cMhE}eQ)kRfndGqY9ZHmvQ&17`K%S)?MSMD*_ zNxICL_RbCu2P+;TavEz5oQl^5#Jt#2Ze6>WV|lgo{8R3(dtD7UD_0|xx>{#y*~uP% z-3@SX%s54Rq@s9TMhwN7%B9#LEk40*UphQ`xi&wR)Q(u{u~$&iFwlsAE5o9%Zlar> z%7NZp$Rme^i<^FatG`U#%(l(;_AGT=ZCsS?1G>2Xm&O)8g=Zq$)0RtevI4u5o{dPEr{wIhHmPC|CUP>wCaI z+SVzIX$$s1|14-Yxa{CMW;f)24;;}&|2Iph|HX6v2gyu`{wuhY{a!0pJfuFX#37R@ z+qb)0jJmO#vEN1T;=Q6)?bu4r2mg9jgfU>D6kZxCbc6?~HsxSr_?(2SN4g%EBVSg` zZ{YUZiqrgw-rw+l5pId_IBxWZ`!wOH4nw%EXO zv_wepA@ex$obMBaaqu)&Q5NxELW;!U_JUzL#NKBalVY#4BgINWM}A~G@_mT6 zB!*@&rcY%29DbP{yX2nkc$q^G9b2Mx=%^f$z%^HU<0=83)2xCueROH3WW%~ zPdvWV)?B$=IKVkw`5Bh8C1BuZJU028uF7?{b{4Tv!=hxu()<;5@H7N95d;0~+)9fY zX#}m?CQ74Bf`v5@jSn5+zO^ew9$(U&RaagT*`whLvs(E5N*26jgYK107FnQlSJ!H6 zHRbf}xG|QSq*nj}DlHr}Qm`ui<CtZO(UAV4_%LV54LZ@N_`KN_YE)U=;rh+IP%=8wooEqIY%M->$q54*(oAINrkg5f zPHyHnbFRCgHl8e014o)4cXxnZI9X^IR^Hb&$UTwW8Fr*}2vr{S+m&nnUf~2MRNun_ z$bJlItLfWvKmz}8pj_8AiF-i4U36l@~8u>a}605s6%+rd6{8AAg7j*%fo5`q z7P7eitsvDQF}~Xa1v?dza%vsNn4OtxFJZXymrrr!v>!J-e}2h?m%^+SML;hPqQd7- zBBSJ9ToEA1V-%y?d8aC=oTPygbA@z=9SlCala{88{PfF8enVZdtj8ED_jF@Y=?4d2 z1`6oan{cDoe2^{r{qll{WPO1#`K_KYkRibLL4ld$tZ>S4;G*ZlhkB06rf*p*9%hh! zrVpt2$8+x_TBga7sGJM|dts-i{1ujKq1T#cpp~pg(i@K*0RPZ8j zm4Y?)ZZ-KCXy`O4ZCMkto4TY2uKcpyWc(GAeJ|JQH?p}!RCiHm5orrY-!!bne+C! z6^GWE)4;f7TjRXvSXONKoXL&xpGRxU(~1?`WJD_16FSu<2WHb=UQ)2{{VU_7g_WIG z(k{hnbYYUTa)m)ybBp)=V>wp`=hU?GB_rRTwfgH>d!|7g+g#B;?tAS z`lkz7vD_X-Th`Y3@lAV^)i~Il1aD?4Wzt;xO)C4_wJbpZSlX3RUV zUBI*Y00o`3Zm<`L%+!OJHa#-u?=Ow%d{!$A@4Edw3_!kxQ8|IKSy^34g<5l0BWF#w zr@9(U#e=LyL`-n`ABG8tqPzL}T&?n7*28|2hdi`|fO?`cN-sS?T_u|W?YHYS?$`AN zJS&i(rQM``Z*CI&oS^ACj+53>tJC3$WgoB`1d-GEtlIA;F}wNpbYirzB$tNUL-r%` zvHxk1P*(298MGVs6!b7&Hj>altZ46K%WlI`f=0PYn=n_U2lRYyJwU_-@2kI$7}B=m z(PdhILU-beW@oXFCeG5|{K}u!MJ<;t@UX}nr4FZ*bD}SHm2?xtx&cW<%@yy#leNwj zvt2$rc4A`uJbB;|e3)to&iXA1Z~QJAx=snFZsC-r+{XcP&dfA`lm-0UQFeD<=>_se z8T6I$>KkM&=Hr^?J}QV*^LI29mV0+GX(|w(Mu+7ZK{z>ygr;c95Gc5>`|&09IhlAC zo$GT-MhcWlL%Tlrdb9>{X@^_vW;dj9i+LiKX*>jjP)`Krm=;}z86wUZ-@#42t8&}k z1}9nzwPl$O1naG=`2CEeaFET~T_NM)qgbPVlw!1PV^APO z@6^*w?94C@$SEsF(%I;KgHo1`SNo=H*oE_YyUGDh3zAduk^J8@g*p+LYu`U-vx`%C zZK~x=!dR&|+|(L+yg-l{5A&<*-EJ<~_g^}Yx!zY~Pz7S! zaZ7!Tdj7oR*fV+a_&o%u2Ih6d3`W?qBh{go5PZr!`6)UZc>v_*KFm$X3TOOCHax`i z8Bgq@o~mzJco~syvO!2{yVUlhTc`-!LD(z=smEfp<#IO_|SXDxUF#|KWOMlevJ`Q_<`4JU~YnI&{PNX5myB| z?(9TZB;!+{50MH~b)ds_7KIrjYmaXZ_1S%=I6RI!{K3%}IJd~CQ|DgqVr12stpraN zF2Dc5duu{cot@&RbKckrx~P@tRfDUv4s*xyO&2wx<+wA>jP}Z#l{UZHy5FNWHIF86 z*_9j*fMTrcYaS9C66Fja;^8n6EUzdmUy%jkR`|O-_DM)MUi1J}W|xt?#lnKs-m_TO zrrZc4Q3JxOl+ z9%l2DsN&=ofuMqlCF@xMb*RhktgAhLwe?e>R+z%NSB2l@aro$=uliDFJ90XH1!zRE z*R!}vK!v}#y~WxRTnId(2%>iYD5`yg3i;8br3MuCZ&94;ng8x78W|fIZCoFsfYX~v z@HESR5_f3?Z=Lb4CTOap#PZ~kYnnvZ*pw?P8rn)t7)eLh2??FF?G)_ns0%SIMH8=5 zN`f-@6BqULvhdH4gSutwJIW>!#d3+Wv_v&p1X_e}@`{9LN61gCiNj}@bd|x^!xwJp z4GQeNVr?IM$J3|~5u1$U20|$%AD_nb#keH2`9eI8e{jgRWdGs)WOFl-cN5)llzuWy_? z!8G2m$kn=P0@S&tr`tyvR?FUC6{yX4PY{MkFBdQ!x1$uH=HPCz10w0Ufkovk6d#Tjv8hFXQ|yme0U*+Xz%!NW0IgyY?Uf(65evaHo?zeX?H)%B-sj54OG{Z@os zC)3KdYVhlfGaVU6O47hOY1T#slb&j`M(f9<4hNERs4{+CEu%&H%Fw&E@}aDBTXvn= zlZ>UFh1!oF$AU*6A2X)N1y}~UV?9L^)Sz4(yXD1fL+EFYGa0d&_TV%|g%F9a@W!E^ z-ipQgH!89Ej9CJsPxnDWp6Rm~34r@{E=PiNr-H0ku9!5Q&?}nKGCNVyqYqNky`NC8|Ra<_0ZGJVY z$VmR?g7jG-O%_Q-2V_`Afl0FCuYX8hE3OFIxtdwg-D=BH(cKMZCClV~j@r4tiNi-! zI2z8+7%JDSiR{nPRG+X~eDKXR8LgllVWAtGb_ukp8##hS;ezmWf?;}fu9CK)u#Z1; z9tXUd^6CP8mN8>Ij%I?+EKKQy6-=}!Xe-5=Z)5|kGpk!$klp0{aX<9VcL!=O|9ldi z3xlT(yN!n4F0>qCggcj(EV{faVNn31AKkepe;Z&G*06L*n)y08{DUv=cX@xn`5R2! z|1rcr=^GtT{(TAV{{_BN7<)mNsOji<5!9g4ZbvcgHQx-($4!(AOl%E$Gl~a>wr$uu zaNY%r4dK{2?;o<13=adN-MVw4{BOhFs>d;C{HcRZa}`Vv=$Wvyo4}*uCyGzKaZ=xH z0@|6C=P6hH=~zkISEtgq%Sx&1EUTmG=kHoNmA>Pm9CAu7|Gi)P<34g-i^i2C>WV0o zT6Czb14+y-v9!f}w?^T-xpzzVN31bli3tK(FwwV5mH0{bSosmMP zWBL@3PB?0+x;07e!t`dAh(=_t0u7EC>FMIG=jqLmeh->x3N(0>9C(3Qg6V74=PeV2 zueLW)(!qr-Aw!8_0ZBpbLL55{ZWv?Xq3M;b#U*gV7tyLQD$i>-SiG-KMBby(Jq~3S ztdJ~Ynud+MvM2^5U^+D3X!6d&dyvm7%d{Nx{XO)Hz+^O6V{)fFTi=Mbnn$nz?=!-?l=ZL=8sE50uQS zfoeu+FDWGN1xc+%zRi=tmV=)pbbF_&X}N7d?XwYNJ$U5T3cU6OZ{{Z1JfmwxTdIzV ztkDl?*5JK^bNqtXw?((=PfS@V*J2Ns9X%zZQ9PAf58A@? za)Yi)uNlisF%rrEmnkuJ$aZaKn6dshb+=fv^(1_JsV9l;jQ@=eTeDk zW1Zjm;5}2wW>~(<@19tfYma_k(p-EtDQmq7#g-Qu5_Eff4cL43ucAp-PMmz=n3A^c z;tp&=OjosN%y>s@^tzp8cy?(i_=(bLqD#3d(E%}{`DPKw4_*}tLgOz@VSQNgEmBx< z@pj;Ika!Tf^;GsmJQ2!=OVrMH<8g;Uy071GEBs{sjTk>`1Kf}MX3{mVqxNj9tx=RN zt*h-69;~hwNC`{%GdWnx$kAVn3Je&_sMv~+77-7(26AT+f}T#47lR9o)3#yP@?3ry zc3MtBMR>1IOv8UOSv6E|K=Qq*1JIi(zhMm`cp z8McLG>x=>Xf)jsIWE?7F$fZ!jAHSI8Ky+|Ttq7__VKS6i-9c=dgnP6_)c2xs z-HsUanEs_GmQm8hir$e0_704Z{Rrl={AjE=C8R-3!(ug% zuB@b$xzgIfun+`Bwy2=+Xe~18n_d_-k(BYyLQy^#D6`jgdsckD>(inmeaL2+ot zzj8?J7AF9^v{xg=FAj4_por<$2mx`qZ2B{pUuFM*4W|~FD;`;bGVAK6A~glu_};RS z>F!E~3dr!2_mQXJZ-u1F1i5x~t40=ih&fsk>|)lVJYBif%r zv3RrWN_y44?ymL;d>rs)7^a87ll>$fo&?r*q=x31;`G6b$ICfeu&YP#%25;8{K5+Q zqbAJZM1K-REwp@*@Acj?h&wUohG@hxLW$G$CU4mPfYWICW=xKk19<0L8=&w%r%22knC8Q z{ha$?O~G=x2m5QE_`AD5v{XaPwF>ZLl$C#aHu48bveXK=YUSxyc!@^LwBxc};o%N; zXDHNUX`@tOp$^yu1&}mf<7KitO!1E+;%ziLui8>AD#E4SP_)UfKlbRtJCIL#x_FH% z#EY!*P#&YBcJR|m!p3vp0GR-_I%Ygh-{fwd$+|<|bf3@cn-x|NKmxOVt`?p!OUU16 zZ5$%|oj$mk67L|38xG&aQ0tdhhHeblzd~`R1(7C*J(1>`$tK{1^-JI4Nc=&Q<2D`o z%!1}yOG|~2_Q=T&)7~$opk;(`pZL^rIKPAvc#wRj{JQ>_SHnT?LJu8K6E+}B1J^XD zoH&Y2l|7|FxCkJgXGP{85pH759sp_@xhy1SZ%v~mEipxVWMEl|6u2W@W|X^EA3uDF zrqZR~;1m}*ea!Zo7~aU){x;_vc}1FY0!qZ!G_JvFavY;~Th-Hu>kbONhWkZ8%rMGf zHe+qS2O=kpNf@a9I$o#1AnQc0VDod$NHo0@N~7?mg}|nQxC$Mh(cb%sEXb5N(aKd? zc!rL;8O%Wm zCpwwRGa#i-BvxZ#-5{-7fo2l@(6)zy`L5RqX-lnuH*#A%=7q|^!vO<9`AxT+86KFtFXFjfS{YVcu$A#_6$^~y%a_kn-AOWcGs1Vs8OoL6qYyLDmOVu&;5lMPR6U+*V=drq{DBcSck6zakixeicLO`VqE6L8 zBm&nd^+UfEE{-Pv5|hRwL9jh}nh(NYPl{Y6 zS&=qW7JZ@do$T^9JVwFBr@CpI3V%pth9kaF_$p|@Mi!{>OUZ|xVHu@k=+|1Lm)d1r zfRGItG{$uwJ`A@bCc|DF%n}9l?hp}1c;2w6X1D@#2>lP=t^&kD{8UnGm zP(Frb{|!}+fDS>S$LOfxqYbMm#7=WQSvu&c7Mr*^l^2-J?$YpcDVqJMCk?#2sGbb! zrU_S|oRfpcch02CAJtkiIB@F}N>=oBz6dh*6E@$~U|%&R5=cxLl`QZV>e93<9aSJw zbOVOy!10;cAbyOR@Tf-$3>g$GFc3=UwB2?N$5p)%TrK3|ah;1 zhr1GW7L2^@GB-Eb4?cw$^?dkLn)>&B;X%s@a{cKw7WHf#RojhE>W3>{r?EDiU&L`Q znv45}6!gdWKK7^kdG6dcOGP@Ad+oc)@D2P3OQ2;tJWIS8_R55ub~ul7xlCwd!tuTt ziJwWTv7niTq0kDU_qe&umBZH32_p~ywJUcmnFOysx}@IkAXjPZXaR+ z>yPZ`rOpruAnL5vN7s9ZJ}k48>I~6K`;tT0q~3ZH{F&OI$azU>ry{>8SsZ+kv$2t9 zYo1ErTOC#WCW6DlFeh;Cku9OdcD`KnTUbIEnJTCMKs2PV35g-cpi1;7An)r>PnD7& z_fnzLxWObKJPWk?q!6DS&lKTI&RD2I_Rl`zoqjw1&mMz=llfmri~lVsnwRXhhv>Sa z)-!<@pDfjHEYlB2AC(cl!^u?Rh9zjtGKSG7HN~0sQI++wH1|4ZjP4?esRC=pfhn-}D zxHE_#WVCPDW#U#4`Lo`C(b3*_|3E|k_x$P1MQ6;##z&^R*7nuQg#3NG z)!@xuwl8RyGaV_?^#uN*0@Le2*y^&O{%kfiy8878hh=*K_Zf@yB?v67D&f^%*6nAo4 zVBk02#zil!`i%()yQ=y~scM1#_LoF`>00hq@TbsSuSnN>rdLTRGyYStuF6u}u-qOM z|5_=|3Kv($G_rL%c|{^shfmdB%pDGe;?GhBJa`}TAq(vOmg_dw_E@RD4D zd0mf9Lvw|3!Lwvb@&QpEicDR%7NuKg-@)Ev>f~o~ZacoInLM2b0?$Hn?(Y95!KiN`(MMMC#CwoR$a6*dm_IEXBdD&f4sh9xSoN$Zr7iT$-PXi>0FZ!3!bVOm zGvI6=pN>Oy7&qR!q4+M>YWV7#O8&| z$vs74ulwFB&k~J`BkVeThP8|Zpu_YWaobSBcxLImHG79hMta=XgQi)n_QbekTF>t6_9MazY%QmIB4`>q zlP@DPvJot3glMeY95l)DJ~yA(JnNx}`*NObQ!QX?{N}_Lgh-s78+t?XA{uwc21g?< zAK3slytL0r(=*DqxdJ?z{+m7%{||d_{T9_1_KgCHphyS;DvbgH0!j)9BM5?|G}0=a zO2Ys{3P_2RID`m8m!ty>-Q6)TbPh3ePMq!cdEbBGT-Q0*Gr!I3*=w(TuXV?|<8$NU zG$FWt)eT0M8nilv=^&FT-b)me&tVxc1_b+hb10DAM5vO zUN;&<{e*n=FWh)(82I8MkuVy9*KQpG-Ll*yYso4Dl7n;h$d=|7rzP;~@vzX$DXEj? z;VHr;*{tWu$#Jb>;a_Jif%=El=(cw%k-~oV-<~3|L`0KJO50PW0ZI0nJjSl7$by9z zZy!Hnl4rbAIY1pap@44*OHvhf9ktxUY!+Aqyapyiu@jIWi}Uo~DZ{~B!cw;MXZJ>B z>RSa1HmJzPY#w?|pRG+x;GiJw+UDu~@g+9aIAn3e3*31W-N!K|Zo+dTiLguz1(6|f zvXS?Nz@xc_ksH^D%;bP4U5Gt1yw1g$q`hbm@-0te=6BuF#%^)|CX5W1ni7n(MCB$5 zI76#U3q9xToe*z$MBamH{mkq~ymsl#t>eORKa(!d$A-MV^}=GpIz`2ucbx!n^MvJs zjFI7be99*F@9(DbWYb-!?t0r~vqzmUcR)AQy3tOtrBn{wfM+xcH&=1}_2;%s3a3eG zdGQzIOM3p55}*9*ieVRCWio#!5r&j@&?3%2j~o#6-KPryC1acI9B>W~MK)tp9cCkL|=hXQxGTW<&o4 z02-tg;%-|4z9Sa}ReQ&6%wENn(B!kXBH@Z~FJyERehK&ZM`R zF1Ck(T&19>*7=)arQybd=QpC$Z)wQxUXA3vkmOmbFgE!0s=3p{)AJOgkzuxNrZ5au zf$l<3nW%_L6(Z5%NR>6tPk)ztHGF$3u^jIHhlGS@qFwXy06VuYF8L(WDUrnPU$9{vFX>xFXuK&k~*VYAm z%NT!(Nk)M9mF-qcTZJ4P#Os&-mLhP7Yta6H$AJ+f2M+X2r{m-BmYGZA8;avQ*5)!x z$B3|il`)M4+Xmg#w4w*Px}2Gi+{76l5;IFhkTgZjkzs6JM zedMT&_rXTx^>tCG7uuM8H{y*ipEw9IH2VgnZg_LGrWWx05uxFDdRpf!{e&-m3YjS- z^#(6@ow7Rf)r`o_u4Bn%^Wx&6r?nNYm0x$b3#qk)3@%jlFItB67`t1swY;- zO`{)p9!=%edhHcJ#PcGc6<3~KUY?GgsE)WG1C@8lNxeDXnqx^xP!hFtTya5twWn+4 zijbXW46RdXt>eQsT}%ZeJj^U;FTVW_#)uaPFto_G=WJ)B&KD{9`~-5tmQ zyGmBprL4NfYT=2?3-uQdXV-HDN~F_tRl0ERZH)yBqUV=t{3--eOiu9@^ne|EbG;+( z#fGXvjyOHik-gDov=}4cYLRaso|;4Jy=?GfE?6wtW3B|ATfNuO3DY$m+~@5i6L*2z zjB!`ii7$~Ep3x>FXj2v-ll7Gr^`w1EJ(I?#$J5C@9m6%=M-TKKZm_Mq!DR7vxIP*y z&i%qEkCik1FkvIFZ(*?8F?$wxZ8EoFnPh0&L~) zONQ5KJJjzwbQ~~77%YWy)i#vEpm$zmzkvjhHKvb1`$+vBb+S=AFW%x#s6(s@3qP}KLK7v zFO@6+J=<2lnu8UaXkUJy62G22=7d0LE*uR4ME+u3*F+bm6d2n2(q7Pt+9TM%d`>e!TD-n0Y=YzBzUyITmq#njksQx$yt#|NcBk1 z`6!G?%H|;&&sHd&+6}q8w7hEX#DCIzt~pu5V#RyM*r8-<<-{cNc`oCt5{8+OTiybI z7209t1U4=$|EIBVGyPMTMyC=mF%c-ae8(w_p|Xw*Jy#)NY%-58W`?b@q)-=o2+>fy zaR0YhG>rd)74rFij^O|Q{A%)J^fybwSLB^)28h1ChP$w@7?+0X^fq$Rxpy_@(^zax zqMRZ?jAG_PYQcU_z1*JFQyEiT&kvM)S0cD&am7+t;@z{b6Y-=sRVNBA207VfT=5$B zbO8c3_i_*xTSrN^W*rVUM(T}ipeYFHs9D!^XB0EM4VlajvIxJsdEXX1mTqwGctgNN zdU^!G;PQ|3=!d|abBXAM67Z?Q6~z`3c6GL?;7i#FL6RM3fIU7dg|d|-?hrz&tsv8w z>AAF~lU=ehpY2BFbof%~wc3VVi>x1!x&W3X5x>ArkHoSXZ%=jMH7`Q}fAks;F7;1y z*`z?RQ;%p-5YE>kPJ=PgD9TC*`5h%;Jb*xlLw3`#u^_N~unMYGqh1taFy%Kr?+UMx z2VGYwa*_r^wg~4vWo&boCqe;)xyX@T6O=@&x@0=k2vfIlS=+cVWNu{Pydg+y#k-v4*dur^3XGfcw+W=NXJuU`~RJMCC})6mtvs zL8Q^;K|+mFJlQ#tqdDM+g~mHg83E`kXM(jxLl|_02P-w{LSHcXct2ezplTd*-YU2v-w?v1L~Q)(kz*M{CrHN-VdS07xg2%(N*TmX*y{nUTejvtl!_{W65L#cy#g{{7qI`(MR z%lL&8>11u!m+-R7`T4bv9A%U1R<7Na&a30_U&2hv zxQnojJqiWR%sZl>$8;cCA9EB*Bir?i9!CvWQiq%jQ`}bnywPH&peA%8U8C0f{n5iV zL$w~#a)#(Kqn9h3geN(PqrKWA7)28LP@xO&)AQQSUJ)%OeL6m{=gZYXUs7LB&xPZ? zMqOd164Usjk38~b)d||0y4)+E-Cp%D+;A-6Izz43 z`O1`t{ct-(6R$BO2c@{V9Ez=cJKD4Bi0FVu2Ygp+>4S}OkbViy8=AbqwH(60@_ZJ@ z{)n(z(V;86{Ofx}L{;r^7=l@OJgbmx%6R9jlKEK=D9Wky>1FHOoX^7d2vvxmnl!N|RLf-2EzV#j5L5-gi zxUfa-&h)^)6!x6&E5!q2!hxy5jih8RLn;(9h*n~)Roqgds;2nMyZq1ax^draD;@c@ zk*i-&S_Ar>HrCga!W7FW3=%ow>*CTJtWsHlHi3W^Ep&TtFdEC^yK1=VPeym2*FDTM zUHaB+ja5y-MZ|?tk~PLnW0F=kmYuAF1S1@R+EjV;C(77b9jE1k(!DsljmwY+Xwh~j_=osWsd6! z9imLw2i2x$p)Q(^P4E+JCCo@HU+)ol`r{IXM6MeCnrx|4OA!I*@jW8$2dDS05jhkM z^w`0;8|;e)jQrZkFKn`OWvkDUQOU7>8)&i#Kd@}h%Tr{5n|WGXT?PBZZ=L!IZHUp- zLAFwcZyi!X0&S1;|oumacIu+!mo{ROdngJ{BvlJnzeFwDW!P{305H zE(b{n0=^1F?pnTk!tw4HE(PtF@U@W#z|<((F!9l@!ePQQE)nxZ_ixDieSBi#Me#;Ea`WYzBx|S zmy>cdN|*Rpw1P8ELuo-v(jNQhyiXWzyeL3M3MtN#xvX1$+9aGMNP2WgZw~?py0spQ zdut0VBIK6KCi};=4Q@hD=0BD_P|HlQ!JKt!l7U^SZ|ySe(C891e_0XHV*-$(-gTcG zN^i_3sCvFGF)X~DSMGvx^hpF8fP4-OC za5LseH?wSqG~P|~vwi~{1O{a>j8YS)#TwS+FNp$AoXfQ@yUO^vVk@4&ehP4Ry#M&0 zykX$Kch-9PrICHswdxiVq?_?)-$xNG9<+2U^i8lZcc_*#$q zPjD{qM>;#Vj0+NM^^>$$J>Dii9Q@!=k;|>BDRbGn0KqS{X);5ZQxCSOtGSLMGX{1s zmx}`ujUEtVrD+By*ui4|5qA{72HWYC=a!|RJ1LQc={fZ)O~mDb0IdFdNAQ|o@x^zs z9oll&ZV<&l!Jk)`Kt1w`7QeM{SpD&pLd_Kar;d8xSbvxCGR68-4Kc~tQWr|zr_Wx1 zWJNdhIH?OBMR{T->#pgSn8K+8mJ5wm^%OtAzdKMd9Zz4pKj~*9JNiib1pb+y*Xc_4 ztfky~V}swEaQF~{-@5vuAs9f^ht<89j?=)+XAuHc_j@4t?K8_tk8Hh?CvJ4y7dFoF zFWPi0iHHnEg03euQ@r!OEh=?5Bwv%_m|0&u6@#5?G)ncbe;@vB{^&sF@E~gQ9Sb#X zpwFBTk`0URegnf;EJ@&(1y0P;AKYvt7*vnit{M~EE9)*%`o<7cHDSuXc`{i7Yx&mr zP%eJUc?Pe6So>PORK1ZIaKU7L-U9fjih|kCk(m%oMNuv4q|{+M`G7L-HCQjf4nV3a z766fR7q0bvS9d_&v48kJPRvdFqou~Sdz6l>YL}6((S5og< z(E0#t8k3os0gd$|*Ip#EG5^}o1=Kh(848eV=&RzW91Q8k8?G<-< z5dGHibxK4mvd8B$7nQW1mzr3s&1Lr@#s@&D?g5xVrkZ2ZWOa!xuFU6d=$&{r76d3K zu%bxpYUfXOS9q-2l!_{g3A#>H;?n>Za+m(yZZ@cB&1f9aVXl=C+1P#45HxMc(p;ha zn}q93J=2PC%~5wXGtAnChYft_Pzl@0r%%}GhjK5_eTRG(P>E4qy>PktxlKba$q6`A zz27y8a*93m^*5FN>h!)S>5FDc<7sMkn^5RLJUF{DJdf=h(x0BY5w%leo-=8jSi5K=f3hMPgBV>KcV+gX@i}Ca!^jXS4nr? zafGsJmc;7BMS;=y{?XSgMr&&xhJ|y-#=Ftfq!aHBw$Ds?|1iH<UHr3*NWSiwn zE0_n8n44V>Ll)J14&ByxyIfCU#yy~Ei}OjpYxIw2Ef+*!in@j!l|q-5E3^tPh$brZ zvUJ3}rVuAC3%_~31TUrFkPS}|yN*z#r^hrmwMK8R2X|gLC+6|+c&5r~E53ByI=F4& z^hTzsAM+FP9AT#DhDRa~1FXzyOiNI!_Dbw9g{|&NhP5sTHUBWj;z`wLthj7$NRt*3 z2MK8ZwZmHW+LlvfdG>Trr#${${%}U!ij8N#xjb@dPc|8)^Dpdzptyl2e zZcvd;*U9AZCmI}9$8O6wL^E=>P8^Z8 zO7+PXme1*fMRZ`;hy)IR&E%h*; zWCTULDA>?5shMH`teD893k-HbRh8|)J1OZc8D?j<5JbAy!IqKnAT{!m(PgI2#LjtP z+^nqm;esT#pRNnLskHEDbdPY@G*4eW*>U8+T}@(6BD8~;^|$D2J7bf=@1!aZ*sC^e z?eWXsGK{Tq8J&-DJ-qLNRilyZdHZgeH~1^V|IG!+g~651FcT%XpBFBM^hVPK49#B; z+CiqgGTINg2~TL0R~1oZWieuHlt}r?1bmiDV)}6UoM8R~-5QqtI&@sh+?kS6xTgAR zT)u5z-KpudE0+=|Bw)XJER(R}a+x)=7(B^GDE@{ct3`d9#UCtf9V``a>1(N!{N*#w+b=<9VG_3i|eOMOSG7GW`0;*@KJ7weL#F#;!G4(3glos5q}<|FwaRovr1=qLYh1*5@9S&Nw)L`pAhr=x zFp(b2+*uo)($Jv#z|~3ZlBc$Ys?8D-dNzMB?;u^r04kjD%z>+lc?leG@-HsKk-e$w zY@>kdq@m(gY7wdUT|j_Ls95cv7yLAjG#VKl^ecFkvsWOcxaC&O;?YHI-cYO(e)rzy ztVf%k&+IK}-;?0pd4=wxv9=Zw^Ai;zB_}=MYZX^iMZRf| zZ#eUtfW{b&j1;(*^XI(OqI^_Mcfz5<9w2_)IyP0E-2kqq#}R!iWUI++9=F(^wrM!L z$S+a(tzPG~CDsGt)P8JT`qy*gC)Re6GryRn#^FN*USaQL@6*moVtyj_FM#zj^L^)S zy{Yt}R+paGc40YXoCn9Zk#1_OM7q!#zEpY6)Pq8|k)QPO$))8BTC*QE+Sv)y?`{r# zK;DUbQU5lPp<_p+81fpNWu$_N-xj|L+Vx!hV@?#2GodkW?;Arh$pO4cX)l%Glw_99 zpkO|qo86nRyWZPnTn)U&!4b7e$w?Rr76M$C5t(QZGg%lB%4Rhw`pcOhp<(sNwbD7N z%9-+tMMR*`zR*< z%CZxSj<29Kg69yMCpO_*V)oZm>+}Wh`cZvY_jlDNNP4(5(hr{Td~L!U>h>PaFj13QFZM)lPy=v9 zw?B1|^jf>>53tqboUnvCDq7fi^ufLi>6;o$Coi4le;}rzd8HEB4TjcO3W*xGY1?bs zA2I3$JRx|rDg%WLNTOfxb)LR1|F^fguO-P#2>}`GKO7;%C_|rVv%%=*#mDHVZ&DX zXQt9*HFwpe{GOQy3(P1?x(i+>8n0&d+8~u)d#tK3|8slLc#xC~XkS70^?A(_Vi<-{ zzm`kw9t|0h?-j8AtJAI41yRv#Ro~N9o11Me^{WNlQI!2I!!o(A?%`J6NB*lXW~?O^Z@4X zVLGO%z$}Pu&I`RXl=*~GNw&P2fNoWi_3;@uipX=Jq{1V!<9<$xGlze!ny~cc8yrYI zHOqFIOQfB0pX>@!7l0!5!O84_F}pJd2bq_rA=Vxgo_y~mIb9uUD#wmp>weP40+D+q zuMuw?^@j++kziMRxVms#-{#>^v9yPRxf>XwjKA1hkdgHM@CY6Hs&BG{rQ;jcZMo-Q zgZ0IR-ov4f%u>(P@h!A511W4Wj5yJJHWB7ERonUjTk30fW^@OpOM8a&RHAtX2lQ^+ ztdhKxff?UO54=(Ki?x>GQA*&|d;@Cw_JT;L;KEwG$bJ>l6)mLYM zlraaHVN#bBO^4wUlf5I>1(6YJXcM<-{qKZz5cU}BD2WcBLypdv7_x%L>T69-lFgbJ zE=JdL-m%2$Wj(2s@t2z0(G(MU@x>+5HTk0_1F*Rpt_q1O*?zkJ{Y_%1^wm??PLgxE zaN?@dK&8JXL0?7I8^ut`hW?q+7UI1;>&r1aGLZfyuL)?AX&hgfFeb^9{?SdexVU)5 zd7$>k1g=cNtUlZJu*-!%i2{`+7QHq2z=+pVkfc1|k-+TYA=t}dXx?!wC@6VQ-bi5- zS!3YTmKAj_DH%)CT)61K+Ps_?@zMQ7O#~1h#=UE!QzRz3k+KlHTLbSY)vc4RBXA+Elf6Hjyp=2R~PeO9z>FY8KV(LI3#Vzr4XIdnB8se2?QfwzJ$L%9D9 zR~{s~e?GLjKX+`{k#9d-pKXY#R)Kjsf2tBTk3!}rl+Q|iEpY#B7L+BDT`6*;b(Cwh z<>!_Q=VGe{H4FbN)gS8b|2ZLGR9f=k;I=kz+ky_SUGF%;ohYIbZnWD`P~cl;4-?Sh z{66VvWLz{|cG7Wv$j_;ss_>#Qdp{0%7e8E+3}FJ~Ek_y;A=Mm$oNk7raV43OpYaBS zEy#Vp&xTPSc?}}od&I&Up0Yz_b*LPZV@swu{$~oGRqTKWyAo3 zHU`@b#m_YPY33Linl%a~0e*gdvrS3NMf)airqW!^%S|IF1ji)-H2e%L}GKP^G;so%Ot*Gf-q1wl@62wJQ#?U`>{%{OG({lv7NWJ9$rZAX(?*!3ZNx7ktbl~Q}hP9yg z7E683nC92XGAuH}Kz#(|6Fp%o9#SE`2_45ZD)vC`Otk$06VU+Iwc zZll(qW7Dw4lF||4_eACM<6ujs>y>66lV^&n?-#(<0on@AJ)W$}VkOp?z%OIX61Epq z5@Yr$uN%0Ms1hS0ZYhwW>UB6s-NO3mr&zo%W+$SZ>2CT@3;k|))y?hceWcfT^l}(E z!K{5gfN40KQHlebRUKri$$HFGzqn^6(kS#X1TV61_$5v#!xZ3$s&KSot+idzlHnGGay%Q<9i4TE`JXLE_P;x%npfV4v4nllKJ{Bq0PDZ8h0ueGGeagg1u@1{H<_zY5 zyW)t&18-!ci->5avXnj0We&)?duZ=5&jgaR8QvM6arKcpEd6Y_VV`8--1~^ouYMz= zCzA)@bN#p1HrQMQ_xrddawmGqxn2eFRHxdix|?Z1PjGzUyiTG)LL5x2q5BTBKd>x- zP_iHu`}7}3R-iHv^#?x7!T*6dvJ3w2*Z-x)|89nVkfHxC@&ceK9}D#6U-fGDp&0+M zBjqok0svZ@7H#$R+3fC@hQh@6FSU~&2h#)B%3}fEwVO?JLTlXDoeMB^P}UfURjHM{ zF~ALFmY!(F{F4afOJe)D%h*0+FY~GRZe+Z@*h!8vVc)ZWE%3Xf&0+<22p;HJv|&Vd zgZ`h5QXFf$^xo9YS=IC`1Al0;3lAS0iq^9~GFzMyC}vr+-l+iqOJ!?303;i49oska z`>{b|KntY@Kh1q@DoP@YoH4u1-2#Yztq244fD}t5g_=EQ=Su~&5*aj3Mp9qO{D5B$ z@m<4Af;y+HF&ol1_g7252Z}`{0Vb#ATjGk?juXR508}` zx^^BD#YnLY4{ z0k5etAJnfa+y%=>xWt{@iq5$8+ML|JGTw;(&YQE|+-n{nFsz_AN;76K#vof9YVTUJ z<1k3SJt!-6kO%^+{HBsd^4iVk8j+zJ5f6ES|76QK2^v+ZJQ@?irF^C|YNsAdhb&0! znQxnkk{msMf3EbSBCX2Gn~SaC?3&<47V-A%pMnDN6mu*3^kdDtS7%&N{4=}}4Tp=p z$3G#Mn3TWb_dNyxW9p3*ym*Bdba>}wkFNOY1hN?9QI(tBBqGAo&>KqcN~iNPP2z`> zVbSEeYSK57@I9#CrpU@sw%K^{Y?&IqHS=r|?6JRgKsYmpI32I*0+lXTu9@eESueJo z0)xdMf*})Ljz)|KuI{>+J*f^dKU-MaH)zO{-PY^O7AvZ%GR>Afd{-)G$h|J|^&uqG z{Mp8Zo7qgU^Xd>s;E7=V`9!wc%F#R%hB+}p#ppCvK`=`#Rv%^eeAZSAMMF+iMDOe#W$J0PnfFynHQm*B?r;cEOgy_LKE z(sa5i{~V#Z=F`SDa6XiyW{lz>sDOJ`C6tnkzY-)RuoPD`c&^D@DEl_%M|e`cMpFu4 zLQF4`m@DKKx|ydwKj>rx>kO>2wJQPn#u=w9Qzmul#_>D@1_+hz-HS93-hGY23L<4+ zSvsh8$wtuw$`}#+AjckH+g^X1OVy`;ln`(1g37q*Q(akm(Z<%~;AkssU#ZS=F|WSM zK}suN@A>$U=G)p=I&l}RovJbv`{C0cv^0)Bc%2it~ zAK;^-Y@TkFD=)WWua%5EFb%GW&85cm@Al0XN0^*cPOdI>uUKf9`Ib`k)lD^>RG4sx zn16ka&(5)NFf_XMfT-&p(ckA|W@~?DMnbmghCT(wskFBRt+25>aP*FGN!Un?#3l@E7)ez`T25Qva2X#THQA+--v2$E=)Xbc zT-V)5Y-_RJXLmd9nI5N)G!nL>2c2_lVyY7_m8Alaen59+`g8r>>pz=Ti5m5Z&6acg z$Jncm5-gve#Y9A`A}KKIcmz=cRVcg1XO zV}_B3J+Z+{NuShnP&XQ{Ef768N7b^-x~gg1Rtqg1CEkC1)2aFq6EDHfeJYk8s?J^E z026-GM)A=>Fje(IC$K>5O53r+ZnKJ(O1b6yI^JA_M^$T&Q5)7jORC|sTXrh7>uMR- zBb5cMyl)ERqPWL)LjMsgJMq@czCh??e-hbmEtRx#pX}|y_0ze=z4hunQX-Qls$043 zoDZ46t^>8z#VPdNP^7Cd(hUg7v)_J0M4nx>IqYVzJyJ8H@-OGNnEjBNrqh)xRifZW zZ`4-sKU~}5yTEVUrf<_{)nvI%rQN?CKmh&f0br(vzOm9Hu%haCk-9ZC0)>#cr0%E0 zz%3U3c8fUbK)qiAc6%dW=Oi5NVnro{c9M9%x1e zLqUBKzT0B!h;Xrgv0?pNe6~W&|7>Vyx~dXzK)d+oil3{4@?QguB=n>M%o4fXmpReh3HonY68kV4f7v{1d^*NFVVqQJ}PwTlsx-Jxd$$~rX{ z-&lx#TQO)gOTS+vd$HDHB6lI5$9Pd&3XUKdnBEetf%0;Yfqeg^9I#zI-e*=pOF`5L?9> zsW`wI%Z73U&y^M5R6;!x0}Qf+$}X0TZ*;oJ+Xvu!jtzZ)LZ26Uf&&9~^^4q`w~5u& z)%3>`mB9#-Ahvs^?UYY4NuINLPzG5Ap=UZ2?iBrJ6z*pzpy5k3#?y?he_dRKb;7Y+F)K_u>+CaSp z>@$3KCEkI!Z{TTE|Mi^+kJY$WCoEg`rqe+L$5o%eU1_(1 z+;Z#@M)t&Q4c?o@#(LF9@FetPzj8}BI#DsI5vS&xNOh#Doa;YHpg1-1V2~xY$Sh`K1?hgs|=_*&&S@YxqGVUNnTc68O)aPivbY$ss z8N;pym$90sqcaunaD!y}wz3AbVgS*LPwIx1{T^~fQ?d!%QB~mXD&#n$@H7)M&+1~0^3!^3 zQb@P^UV^jff`C=4){a7!>hoc}8cs4wB<5sjKI^APM67)|nXK?=?D(W5_xBa(#c5vC zS?w5IWfUW2`x*9P=5VPi&i_J_ZD)3Tq^Tg=>|JX2ua5tl3m_rj{F6zpn0M^)Zv_wT z)?BYIe+8#JJtjZ7JeIz^Za~z`|D*))@bTFAOPp1?i4-o z((Vol=n9Bri1(uR1*c7+LTzERxw9Ij%qwq!0=ACmrJKsNySvpfLSJ%EOdL>%i$9~y z7hNR`JFe|LmYkg53*5u8@C>PE$Izy8B_S5pXrv>iCRP(WOT7pBTVfW3fn579E!|p#wDb2t^;y{MR>+XS$CT1mV?dDflr;KwMs83l=UXK#~V@Gx-Cjm`!XF zgs_CVLeI;w%D9ilH6FU!H;B6P9in;LC%vmJPZ6SpB|`)E0JK@fs4IAW=VR&!5OKvsm7+l=(VZT2`23#`>v>=e#@$l)5sZ@n}0!Fmlc6`)X91Mv&nNv32kxB4t7Cy6lbRyzr9 zA2!*dySJeOKDaCWRaE0{$gJ~~1?r7gXANeLP+5lStdNL$4V$#g ze8!bKHfEeY&R;p7&1baNb<3kGylvOB&9j*-FzE!HNXez{d6==>tT7g&C-?<&=asq; zwUVsI3cb_CiEK6XwhFKec$B}y!q`Ve^OyPsy7GG~_ExD6E;DyfUVd~+NV0t!PlF=C zcYBC4k{@QTTsR&jz9bhwB&@0n;zkoxm$c!>syqmZMUqg4W_PAf#IyI#=1T;32d~2a z+!1WmW@o>;nD>upK$^yI`BZ}j`YZRaOX%(ViN#J&avu`(eh-?j*foR(=;;~tg=~W@ zdChS0MA(Vw1*VuUQGu#aE2raxJm-*0IAw#AWA#2c(Ta0)yzrBi^{2PbfaZC$W-V|x zzp;u2wdM7Ynl(>(v%eo@vea@B zzI5G;L%8&eFkP(AK03dr2Ttgjs?!l?%nDE&Jmhdq2hYwgGBWkoOA5E_4z=F5<5+4O zTweh|XO!59nA?bmlp-4Z4TU&f8j?O7an&Ze$2k?Ff9!XsoPt)dv;873wC=(uXn3J5 zu3PNMHI?}Rvv|^n4@VmgDrq=E=Qyn3hB4Gc?fjxsWLTq5sKXnE=b^tVLx^YB(S#Rf;=p6kfS((e#qb!@@4uCTi zN`t282W?`KrTCKH1EpYF*zxzzh&Qrj{#}x{U2MjN;GI=Vp?hr0JEKLYH&)nRcRqjQ z&P~tb{`Uv+EMk&>19LqJH@}L5KkLc56_d7@s51AI$vZCZl8%y=e*EL(1|~3acYkIB=Bu**;ed-1JlT}c7$9(&_&T=icaDQ zo9Hhppf__CJ8CbxNvpZtJU=FYXQ|H4CM+0%zr$e@*3xf{%PisKseM!Kr`m_Ykucpdt z_~oE$BX-MDSZz;6k$!r%2EL|#t3W#3TykhYnd0-uSD`P00oH-N=F_>Jmav!STBilRjoAT_p-P8Wku*c>-1-jd8ouqv^4q%kOmI@5u z5M^rqL_2=1s<|iVeapMTEOulwl0=?q+F#)1#k_uU0eip`2S-o%lTbDFaar$QayWY8 z<#l#fY0~;ljr61k2^Ubh4q*D|DE{n+X=#!ip@LL7E7&h2f$6sxw8#@5w?4W>M83YN zp%GB*8AOr{m(S;?9t6d7b=18I=1bG+zM1CX&QHo0h*GA6z4dRVDm$(Ft)}>0sHG>E z++)SLL>bCXiynM)8ktT8m~k9b{NAePf3v)v@q!yj%i` zn38|%TbXV(cfrJHS}G9sjpqQsgj2b%ch~kK`D}QBe<-mA6`Gt=OK01R8ej51d?fFD zwD{;(>~$y7W3!VS8(|mPr+ZjY@+^FmjUih>6vUCm@=0IaL{H$WN*eiyhtYAmpD%6X z=b!)xjkQ^JF#I+gCsVlneZ6K2)b<2J#-T`WZ)Q2PkfKf zF{Gg92*o=yx1IMS#j1WEM~=w)*NMM$jSw`*{-60h zh#Nc!H&E#j7Mz!a}|y*uAx zH`YUvZuD*Ohg5iC;thtPzc1cp{-5U!FZBK)C_TLo)mUd5Z;_KDo)^x-?_-S5tR`k( zicV(q_dGW=?D8hNP(EH$@?v>zHaBUtnF=fYYvPiE#)bq3H|%hMCGY{|RXtAvVW-}5 z3GW`=?yGpxr~T@^ek^NwG2MScO@7m5W17&Wd{=hll@d2))ORg#_7_o>=}*}1$d(Y8 z0F2ZJw*JkoEgV*O`@vBsrP5(+_?ItvzS1V#i8|NY^Ukl6-9YC=bCnBh@2`i1dGv5d z6-fD>hn_z&=NcjktakTToE_KVU^Vfi`cDjUtQIhbkQcqZwSqRRPzFlQ0EG_-8u*g& zm2kT`@Fywj36l^->+;6Gx$XNAI`8aM_~P@UvYfgO`Ji|{%Z5-cA|i4B>=8c7L!Cp~ zFM4QE?PQLZa``j$dC%;;8y8E;Mg;T{XcF@*O4AQpN+Hi5c zzsYa38yr*kF?`76d$TWDKFf!@q^fIK3BSb!Wcro1-mV5dpzy-*gp^Xoi#)#|;|P6m z1LoZkND;Xh{VtS7D$lnU^cz-C{FG;kMm#{p=tr;}sXRI9bG`kSIl4K`H1hY%m}p8V zKg}fu59?QXK~nj4zXMvSaPa(+c6l=E$ z5y`n<8~nkVeA+zf_n3dL1}^zTYV$<|_fQeBzl~C_`s|vKQfVpo@my) z>s$aJT)ds08J-rJHDPg$Xk^@elENsd)7axRKFh-4$#mtzq6r=@e`S>)DQRw4-@>)U zOfKZHD1u?Sr|re#mlE!7r&vP~~}PD?OMNYa*nUSN{-&Fy+KztwWmo$UKzDm}`&V~kN^eDxGh#L2tz>klH z;ZC>ZakG))Ap_1$!zC~MDs!l((rGz2(Fb{W2p=y!sPNFIF|@MVWTNWrdSvsNmZudha`Ev^##u zN0yUw`QLPz`H9H)ocI%ZiQ)+4C#edYEpEAZCGLdx1Wm%C_IOK^)ljRdx~Z9PD^0T1 zRh58HQjg_Eo+-J{(_#ofDx6{_*admz^BA^xS@87qPP&WpYQj7p^RXe8lD`$uK+pdp zrBGV7_S8KW!M~M_*AcK3RvntxK*u9AU|q_S`!qJ{)qC1q_>5wXCKkT&*S!cwUDHen zyB9Jd*)pPmp0p$P9fZJdl zdpx)M3T$>ER7HNIo%z>vX`n}=E;@KyP0Vze^50}z{|$s2e)nDTN^01FK+Tw$kWfmb8%YI8=?>|VZkFy4De0E(j%5V|q`P71 z?q=Ctxa;?K?{oj%XV0FqXW}#S&O0-=JA$N0y`wUH{D4J|f31D_X|Fl^3GsYqYXUlL zu7DKDz#}b-^t!Nie`5#%MeVyMy3p?>2UkVe)!7nw66y*acTcUaT;9FRmbS`o2U~Z2`t(`6YX2{?Vm*BtuS$Hkvp7k<7*fG~xLCIIV*F$8%gyFET%k5Zjt$Co^ zX|*HpH$B$PeWZVz&Rb;2$n)>2(x-jzC6$(Q=^zjI$1&4qJ#g19A&g8KQV*^GfR$Wc z12a{uBvyU^{(r9{zAMh}h$f)@H7$Vd+6{4ZS~Bw!C@OnbO~WG4E>HJa1eqB8@5M!e zNdlIV@DEy(RG0K056ok`8@HMpNE*ahs7e4qY%ljKdBTvgDg5uPsGO7bB$(&oJMUu^ zXcRR@48Gp;P$xKyp~;1S)r*%tfeCJr8zugyF>aHv;f)f|a)hAUv&r}ir+6!f*o)Ak zm04p)7U8M{@?U9$=<&}xnY*b#p!vdv|Ryrs8{_Zz$}-3Q$c{ zuk-Ka;Xk0r9eda>eJ+rb&Vw48M2v+3K$YxZ7rIiH5OX{w3!=Jx8{^Kh5`P}AqWDEaw{)u^o^36r(kKA5izl$}P5%;#iN2@qFs}<%@ zx0M_LCnG5-skEDpuQX}($Tx9=|L-^U3uYqXNv9mLd2nb{moE1_%pE4 zqN1WME-nm7JaW-zKoKPadRS!9Nae=(&)D9HdH!{aHDv@yE^iT?m@o3ra5a$%TD5bW z)e)dx=TlK3twHrE@V|rqlhbs7aO(8fc}VXZhG3$)+AHj7F%#$ox6vA}WYvn4 zKkO|Rmuw`H=B*-ylJie{@?)@zOCHyH#i$uq9tZ3nch^BqBzS&9 zF^f2K++ooN-0ua!*#H-2lO<5X3fQHVO5(>@+3k@?_LSmf5Kphl9$0J+&CLLrS>`++ z03);a0T)&~rLkk^LF?}LL4ziGXk4r95E_+@Sz&rIe=GXM^TR#qFwL?arJ;Cg(9Nf(Yy*Yb%d=sh>N6UnPoM;<9?4c7| zfi0ZF&IVLcf2TLn9FYI2&J_+pxb2Onr#%R1Ua!w+s=Nu~5l-})_*LV#y@5%}^eL{X zc(lH;?+$eTXH#`8ep-iN&+p4X0ySYlD%=qKN_cH-|466-6N^gN!w3Gdwgr=Z259>9 z&!fKek?U^xGJk`j@JSx^uqZV06WcFlexT=FChaMkz`&CwdDqRa3QQ1O&D(I#HdI@N z!^_11PQBaw(CM@W?|Kp0!)0^(1fujI3hdg|=?`}bfHStp0lZZ=TMuG5!LQ#)_BDhP ziGX2;N23up`Mo8^%_h6G*0f`)*`SLOC*Zi?qBTBs<93E!$?NbpNH)9Rh`Cvwa_6^#N(xWiy4nXqN)VgUywPF1?>p+7>{*=?j^G9C^8lI;U75k?N z@$;&N+>!?&>F{al<-i*W*3K~*)~A(IT^e)sSWGrnC?$U?0=ou$9K za{yb(1a4;ic954{?@5hQwau5fF4G$@4b|X|<7&=oK`d29wJ9JFUAKO|9+MV4RtoA6 zkadbWB|Vv{4bHkhOi}6F67$yfUzyQ$&#f^^()6%|*YSTvoJ~KSPJKztu(n2x-2L@K zYz+(@qUE`kdaS7?Q>*cb@o4lPP@V; zqt2Env4jH}*G{ug+Gbk9Au4-kA7OJu~-nkH=#6>Q{RQM@Mv%56Cco zVT7YeQ&ZED;PTQnvr`UJuB_Pi1|WGtBCROx;2uZw$7HaScWwVZKWHg}f#s-*-MZ(C zSxPj($;lyd{v4`3PQmkfB__?M`2Ju-94G!NDUO_o;myN%YPL6%Tu=t(rQPQfw8ly% z2jFo{mhfW^%TxOKR?QJ`EyYH7etaJ=<==x(ENikz)8XkeEV^(NLU0KD-)Szy^U8fr zTCge0Bia9`6J;HZkp%9VbN6^hqmS6C$(R%%b?I~w-`vRa^Ey@_e-Yj1tiu| zFiARq1_O4bpPO)$A~w=9DSsLH9Ly|wf&v!d5_8o7w^i9qbglQ#=mUAKRICDRkiOoW zl)z*UU&`793H%{5APrgt`$Wehw*%x%0w@|DBV^2=n=(B9GZt@WruV*%Y4PNo$K|~L zgMM0ULUIjc5eF5ZTDy1>dZ(!nh(h3N&wOIa4G0|6(sit98luE`-{RNNF*7B8_pz4g z*%)Qwe(n{N@(c(r>!GWm^k22721r)C*?Z08oYoJm9IhsGh{rdp=le=~U$y<`kvg-V zqvPg)JSNTI{&qcF7@?cXFvGZn8_7^xxrGA?qh%43FdSmZs+nU9oqz`YclxON&K$Ik zjhD%m4AHpFg&pNZ^dyZoqt93Bs9PZ#{?Z6Mq#ksswl)+UXw1CS54-Gq8!*?nGe1)I zb-74=I_k%6SF?=D^A_5X96<|6wEpMMzjw1J)TsMYEU5@$>~1VSP0i#pDpp*+rPmp5 zcma@%Yegj0M6VfKj%2z+a_z+(Er>RGp8?3i6RYbk)8? zol|Cpwys`<7|LPMCxra>%)~;4enIBA$J1|04LObj_PY0q5+L7}W6svFD__0vrWJqq zgBejtP5Q!?kJu3myNR-^@xNcKt8&ovmGHeRHhx%o!NyCicXz9(aC9?o5HJrW`SxdS zuJx%Zx<2;`=z~D)Kf%Dz1cVj0>UW!&hY_E`S;xRjT$MGrFhxfn;o3C@K-oE7@EJHnN=Pv z%S={Rka@|<0X$*as_VQx5mY|dsrbTH%uY|Y^kNkECJtlyV98l#$Oa_srhu&|n^{-f zRTp=GGvnOW=FW{P|>OhpDNVm$zAO36_9W+o`@)Jk4567_Qpw zXj^V)&$IN}rD{yRVqI4XP-@M?r#auK^DGfiGI77(nesj!1u*Hz-84JTuT!LMyheaK z4p2pY@bBk-R)UZt!=zp@^HABu>cY59(^^%9x6(Hiwy5of$Qfdh=bVe*5Fmg(i;Am zyLL3&Isxr$q5Y~~ja6H}On7-Ia7zN-HS>`zkFAhBi^(tRpO~>I39hRfT3F}}30j@d zGq*lzU$HLZ;3!9y{Qk;XQ9tcY?Kh2qOxHhPdGqc{2>_7siyf7UEwy}`(2TEisU6~I zdRVs3HS9DKt`zqVh`{_WsHgFfo5=sxj@6S+jx}zH**qq?FI7vj!c0C{yR_; zKekNIL!xQ(g36Gl)k$x5{{aiv>RwAi`H zI5L((nFIeb-uaW`OP&Hm1^H(U*Wre1FBtw;y?Y3-KvM;IbmD7pRsMaXcAnBO3=YT<>B6BG^F&{yoyvF+aa#4Y+y*WneFJTK(ih`;Q6y}5XtQ90WcL1VN5IUd7`Y65 zzo&kA@?SIonp__<2{T?VKPbLcjl?z zF{no1cp+WahMOnvGtE;%2g?Mm=L$@=0YL>OBTYKJS!-Y0`Ko^m{eX3&xbXDPV}8IV zeR9@$9g#<_0>3k6110!DufhCjAHP)V+<3XPPE!+_G#NeG9EiJ%7{dlIG4)3KGkStE zcFWhm0dGA+Op;!Y7}UEwMib+*TuSI5TSPksTJ zqmI^d;@91K~W5~dnSjZ zNXNkNpM#+vd$jPYA(gygW+X8p`Ko)jAg1E)-Z@zL$RXgi2yYbkYvan$uW!Yf1bK5v~sXdDFq6UJh+KzR`^3g1`0K_uxr`>6>2E1I@?~S~V z>r`&FoBwt}W+~u%c6aQGzE|U7TBv~3DG+7Aqp?`6MaOnw{F;P?{WZ7G-K_YTB9qorLkpkW*ZS&%DH0fQ7s4 zPCb{$OuB>rN?Xa1^H{=_snbd3`h+0T>4*Jy&PQF*e*&%UF{#9vw$@7wXMUDyac2cc z(kgohsmk;#>$?naJhx)LFZI%@)`*Hoa8=)`u+k`$_=HfA^R;puJMtIz_si$T5{99*Mqncph)_NvOE8eC*wA1nXV z0+-w4DDgF$p{I^;e`6KhF1`*4qee(&S$>swYNy4)6H>AIUAo*J0mJH6H82XgDCw_&V_Zb&^5 z)3YP+g>qoM!$~*(?4tfTQfo@lob0;~EPQ}P2L0d_JD~SMy*N40kV7fkc@&Fd5pk4X z>E~~wY{T;!UpX%BdfF>2^o0`s?%{jIjH=Wi{u*#X%8S}{6e(-E-;e*`%#UNpV^kK6 z>ftzhRGLQTa=QTqjNC$Mk55%hbc0UvvNS057)DpPQD>I}{8WExQXy;|Tr;aoZ4lNEQ4-cj9-m7fZV@2=Y5Vcg0B1$Nk2Q)54cmYn2?U*X9^t$ zKh$ZqKpp)a%VzEHxz(im7%Ywnt) z&m!$=G9_H6;LiZvbG5*|MDr#LYc@D^*l7mdBg@!79$z6c^|P+7c2{PMzOHrBq)BQy zkv7MlY)FeUi`!W%dO@r4Hr*3)|7Zd-)Cj7(SMAS<)}vI#F{z)WT?2!w8fWpOGV2(8 zpHN)tAyXG3(I$2_tfPY?)n$B<0Mwth<(Ccb?PBbfW!4d9@+z%vV(^R6Pj^WTu8Le!nMUJUWoM%dWNo|``Y#5U z^r`ZWQ#J1IM2KEI@D2iD48?edoceWgEGjx3W=AcBW%%wt5R*2q#1c(S zL0HA3&fNO9>xhuy6keAAQXNSCUHq?*J0baB9*4n7^3|w2F)`?^w!rJ(Ln)A;Zltrg z_TznlES(7hYajWL;J51A$jhA=8SK(7KCs6ouhw$|MO?&CqSaB`1&5gk?#KO0Y&%wq15J&=`RJSGbYtsqCe8J zjUi@2py0Ieb+x1^)UZw^eV20m(ai%n1wScFB$4;UT`JmwzU>vhy@XeN-$>n1-adWE zw0Ur2r-{H^^0vjvaMgsqG9MvX`V`lDXH^BW$h6=l3d$JQ1j{{Qfs7OOzkWSLU#63K z{8DB#l5*42{RTUV=jL22@89U zK(Q~rra9%#ZI-cYg-hJsO^+&>b}=o{kS$P#^i?J=FNd(I(g?2XqON>G8egXG0+Mc| z%JT1U=9PRy=hm+zpai?zLw2H*B6~(S-QhcFFsD(dIiqA6oG~P@&b8XG*~BeU?^!yM zAIrB_(vstUQ_}hNi}e&-6MxO+Q{%$akPkYyw?yZ|!n6eOa^-sqWL3T{ZCtPZIy|a5 zA=Pn3-xP^D;XKW#HIN#J^KokQD(EMi(G9dvosLL6(=lBC-Zt!LC@Szt$w8){k7n(9 z<0#i&)NP6`8jv?zG-B*A?tnj{f^@h6$~F!avd2Gf3xZ4ATEtrI?E#D#WbR&hDxLoC zJBL)z!s|OUh$hSa;JIj(BIn_u_*TR(L49rG{(_)`*bnHE&zHb zt-FXz`1HZGy31m)^Mjrl`kQ%;+MOgmTBik|6j$`W`XlLo2J~e9frL!N=lDLEjw^?l z-5m;qx3=Yp$(Le`gAqBelUDUw?n-F!4|*|1vv|dI#?&*5+XSuFAhB>cHZ_z#QB`Ii!|VewknvpDi(D$~J7S7SA7>98SxN(qj?V%1g! zR%16}H*n`Pni*Jsg9;Lu`JM1)*u0p$e9`s4uA}3tqL=i7!w3^uepiDW2*Yc-w( z^Mj@$7jipX)ojbtL&m*Y0|_V9kR=c{r0x6qgR4`!_guYhpDZ*5GxNpX)hf~h=8T41 z;hP4&lQew#5wqb_K^lY17?SW~b-ZB`q_JidY=@s;RA>{!o14BPQ@~E(c3wVC=BT<} z=ML!@36+4y64S;rFRolV=t`_gZa{#8oU6`tYYM)&sFJ>KXXz(n@8r~(S+sXJ4yyK3 z1U5g7AX=_6k!{Dnjx70~TB;jsU!Ns7L6%vug6TePwEFclUCCK*qn^oQs^q@PU%NdW zkZ@$P=i@tm@)~1wV()lwdNYa90ctZ@o?mVVEwjs2K{kz|R?dky_DmgF#@3(Q8(haI zYzp;5R1FVQTRx-YAJo}&H=E}^olmzrgy|8e%N37PrvGSF&RiS8Pw#}6LxTna9-0%e zpI@Kqom3LX``^H)EgQ%IuRc5caaS=EpOkkbwzG#x1nk!KnVAMYFHJ3GH=<#_`LU?% zZ@G}iwD-5Y)4t!b!8y-<7LQzlE!`?bnBLY)yad;42q3j_3HdF~6aoiN&E2 zzvHnh?1TQJbI~_{^T}~1(AOrtH^uK+FF=2irbD{u*klO}nUSWi-A#*7afJq$;~`Z>nWdJN|lNrnOKDA-_&c<4Irwg?lgM;09bC|0h6m$aTHkm zcC$ppR`OSqcFUry5CG743f(^dp7H~l_~*&(dnQ-Uz~31uPKHABV;uc2FQI9ObLsvM zVR}US<`Qi?TUL<8nwE@8_pcur7Ru{AYy5Yrl$b0Vx~JG{Rxg{{{2Ra6g?tIJ?rr=z zb>0+{#GnTv9p*x4T`xipr|aqJwVSd$>IZ^nALxFUgUqJB{6N)Hu^$|?NkWf!0}3PZ ztzgrul)O>(Gg$LFV3kN$>1}pet5MGt)S9=%tkq{nt~sM66Y|z-?}X3e%AYXvy=ebB zi)C?x*Pt@9P7r2%Uf^B$#Ta-g5pj4IaQi^{S=op(aEi@k+v2$#{(DFNi)BzonawkR z5J&r1OzMbM?OXTc%7-BjbFJUb3HJ!`U ztiT$7`ZzRT)KhmBcf&R%woc%v>I8Aippy~z+l#F&$7h>--5>g0P}cl(NyP{b4`hwY za-OAT+vPB@c_&jFA|HDBYQ(**-h|euQKyRJB|xT@vMSiHfB01$QH|brGm+bkgyk!{ zKc!zflKaMux=b{F$_+(MG+Nx~KUI{MBqw|{bla0SQ(%L`jk%uYxOc+;7IoQdr9!?T z*l!-W)*kKUN7KJ#Vb$uLg$`;Pw0vuHI&iX^N2FJz$y^f7IW{lZpK#QXR|eCiMfvUQ za(O)NPFVG!(h>p}NAFgJ+5=WIfKNJK1tO$o&OfT(bqjET;PMrQiQ*f#@3{>TZEG@Y;?AC#`{M!8hZI!a#ohEZvHI)h$Hlv zNET*i$R}$;bK93n9Go(AU~6VuEZ^H3BxS+;r4(y%ycI}uSLqD2>)LmWZC=yqaCs~Q zoAbZlW$B>y;R683*3EgfR}l9ny(t!pOrsjb>UFVUJ1c@_@9D6ka_Tm3^)36F;Ae#` zbEiX&yvtIh-RvJ<+2*`MZVUMcdPSE?ek^7jo%BtHp&?U2F`k-@6;OJGvn=L*k&-Cg zQ?Kl0!%M|9rB%Behgz6;sg8lXU$W?MI)~nxbKwA9jHdT^`)PcloHu;po$<*GrK7`& zD&enb>lAecLbbO$g5Tv{7OH{|+Q44SMFKdlrS1z9C^3giJ zu@NoQ{p|yNJ+AZZYHHYw;x6C@GJn)?bn}5QW|rLr$Ec^JMW!IRzH*XX=Oi9X`F!ec zQCN$(*U@XkQ&@%ZdQBSOB^&&+w?< zJ3Z9?oC&(AH$;FVWvA(%w+8pk z?b;)Rh41BfLo+L*Zyzw&8C9Y?Y-hz{dKJLBh$T!gZOw@~xyqa8xq{G)lr$Fp6ZrN? z6PTvbz7CDdftYyLM|_rxRi-zMmyMo5tE_=^ZdQPTdfG5=yixxYY!}Xa1xhp0&s3XW zjs%#5h1c^72Us^s9J6Eg5E^l!WOm!0dFefSN22`|evQB;k^J@jg0QFFWG_;wvW8bX zIv>(KDXbd+q6#h~ezadyGmnGJ-n~F3Y^+G*0!WrIC5Gr45^(bdtxEYm;vv^&U_Dny zp9~`?4mY_#3pGe7y7F<`Uw^(D9T1)4ynQJ8U2Q2K$)i$g`APl_`jT^nKU1Rm+h~U3 zf{$e%mw|+`Z=;=9Q=kCAdhzE^nS}V$ zQ+b2>#@-QzwJ4N*A1o>nwrwZ+J(K6;pee^3PbJZ%x`-403y0o*W_$8e|Eh$O=yxmS zEZ5mAMStbT>#NgEev6M^Uo0S9ZWTuazvG)6UaJH5=XL=j^-5Xpt8{<;VVPU~Iq|Eg zI&bxH@M!3@l?~ixW5Yp8C}|@<_w}`#ZI0?L%YWRO+NxWNeFwTN5vM8ZB9|yTfZ6RI z@AD5kZ`wNU;98AI=Ty>wue{dE6SWKHhQ>7rW33NmMg&W=pD`YjXj`o|S|9Kop?uS& zv5^UdGnVC*@9QVcojRNiMh08fR@7ub44X{_vkO{=U+kLE0MG05{?vU*Rk|ec@N3!g zs(4HH7NTNpHzkmKIzE~$?tWYu=322}f7V*j&s-NFK3Gw8nd>`^d$0tjl~`8S8O1dU zxaxeO>bbS%+fMqLCtiY^Cn3w!KXz78qf>-}vdUFj5?no)K%T=#R$2Wy}{f z_;4{z>-zrDZ|^TkJj%LcIH<8PdP1+6BRJ?PL~F@y6JY(_V9eHWY< zJs;m1Q_}A&I9jajR000pHk}63X%0;|k8oZb`tjXGna>|Yqm7qEJ4Q%_zr(_s~`25_V`?a@l~{VZ-7iMeGe_zlC5njK%|>^Se0N#r(DUuz%PVr?E?1#78_&);=HK{6T;6 zG1h-sZamZZDoHxyP|*{9{~>j4VK{Znp8qQi>)((MNk60urDl1myFBlAPyqyWmxVwH z(8k2>(<4jwQ?pIWlj~QaJ=(vHKF~1yh6-OlJ(=2B4J*XMtyR9Kao6SG9;XvpPV*!5 zzzR%VeQKXTtk=L30DBntUcqU6pJ27;EIR+Q<#ZTVBz~5cOt>G7^}~``a}3EBy9UUN z&>prVTHRJ!vGrF?#%hH8dsP5IRk(QttvC3!^GJaVZFsuRa_?3kYG9iv5SCf7_O}`Z zfL+RH{zIMRPbfc*;%SUA;8h_1ncmZ))f&~(LvNZ|fcBWd?QbkK<~>`yfG6lK2-o4I zwsHIRP##Cb{8?269)-7TbPYa_*Ca>6c#|4{&TLTeZnXjQb?b0X{HDvHyYPuZ6Ir5u z;{q|h+PHN&n;5*R)*lQ2Zo|&v`I~1b^#Fj^dDzzc84eyX-4phecFW+6qqE+c9Er0W zD^7E!AG`21uwDxXcv07oE>@CeK%Tj;mrH7oJpskW`#itq^K7pD$k$6|c)%^H~* zK4j-71}5b`V|>`4)n!$OmD|<9UBa}ZBk!<5X7O-xAAwK%_u7-9yG^ur;43{_n#gC(DkfZtK)dyyu%kS2Dl7ciF?;e*}^^{fzg`>?_R6_W6j9~ zMQ>i=R}T#?;RB57XH2KnL;Sz8qjXPvd+)l|U7(lrk?%(mt)SkP|Z zpTvU@sTom}w@XT+#n1@6ud{E1&8Fn~Cz3EdsLSe5&;dCYI0=f1G=%v{JiFlqS<*WM zWR#R<9D-g&MZq!If(+vCB{$A*oy1!GuP)RJ4M~#Ui;6KBuvk=$GU;~Q?#y~>*kTm? z2*YhNtGtV%KEl^n0$|nRSV&$^gVX>Pr}O|PnwsgDQ{Fd*(4^*e#a{GUUD=eQ^6&HhJ)3v zIm;*92#B~z=dp63LhfoBuK$Oq4kKHoLQQqUzuzveevnr`eB^R_uJ*HRtRl(5L4{Uy ze^c%mAh|NxN(VqzeSTqKi1;Sse@-g*DlQIRRY9zasK?d&+v6fa+WgT`mDNXV{8Y(7 zX{X$i!57&$OUehj{r#(diCNq8Xr~bLxS^;ne@meu?psiwzp(lByHSEp&TAD772U}r z-Su|up*z1K%!XnA75i^TNy`z}9!}m}7qD4k6L&;HQj75gusgAA2yzPanxgH0|3-Bl zk3z#smPR=OO+Q_nwNtvzU~s zhn7oASnHs~+*xp?Z!<&D%w6F{F=;HO{CNEkys85qWy>d^q2ZO|$%(ZC_reg~d^CSr z!tTS6kAKqSAdYPqKJCUs#Is_TvW96v<+G~#eX`9*B>w{d z@b39sL(L2kV;z61a~Kyg3?Yy5%~pON?m%RS6raGyUoq>30Zvcu)Ir$R@*2X_*%N?W z(Nq)J)`0Q1OUw9OeI%gRrWc7vNW$36SjbgEcsR7>TLC4@FA%;O8X(-orGvJ&6+RMK zkWHK|>#aPb^MW(?J3;EZtmy>SuoMMs9RuJS_pvedxZSxeSy*Lt?!Dsc_aJ9M$r^+S zO0PzfNLiAZzt|7x$40fW%RlE8JJ>{I4DC5Wuk=_vZP9+JJs0daa^g|Ouq`>`b@NDW z_s4T`7wBD(VP3mgSmO-#Y!ji#zdEeM#$=ob4CHz*Wp#<{TWo~EV#I&9!>yD)7bH2n z#u?)>T2@_etjTAAytHjgYjDz%8?!>sJSuU0wry(#-l~<2ZsqDeGB;+4iGl9Z+cB;Z(ljRUY_(-midWZV2H309dnD28Mr25OV!QZg->c z$dOsGuhj?1VHq|)5C*+NztImJnF;M1?9`F+6Qpton|Ol^G0W{2VWZ?)lRM9;OZ(_> z*nC756uo6Neyds}4ja?J?wu9F8eUrPvDipQ4PRBgevd_{l#u)XxB#Cl8&N<3f>62h zu9wv(4IvFzkC8v~tRS7U_O-3;U($ah#A3VDOPPqGT~wbE8$J`Yt!VBzmO!6AAA=Mh4TVzOB~&q|$yjoE3!< zrJ_0&$vtSG*?&gP#7Rp!fj*n&74q_LF9GWBUqc{HHFRM^++Zf0Vtvfs=RGGarc&Se znPzr+&(Mofy+>lExR4$rF@!&soXOO;YI}|nt<2S>#%S$F;*DZV-=eUX`kq%pStc<$ z$TE>}4GUM^kmj!M&2Jxc8N7!MaFQ0Q#T)5@$6ZX>eFJa5`|sF6$|}!&x?eH}Cbiw= z8Q~ah_=wTA92vK){p+m%;)}2=+g=%wG7rR$rtDfDW9z#i*5^?$!Ko3BWsIUpikC>bWomVTQq!#E%l^e}|PI91Jp_<0;W+(lA_DT|qx zp4n9x_O%-Th8$~`s#a@~ob^mS9F;5zE6~<;(>MZB-`|LTe}b!#E!XnQU6rznz%8nuYaX|TGJ}H$I8(64mU}<9ufde za)+^1Qpase(g@v0GYPO~WzdwbhaL7*(SpxU+gEVZRA0v$diR!lZ<%$<*uE7)mJ`MU zYO9WCtZ-1|ig75pk1}?KnTwyXjXVsYWp*L1LyYm~4}?_1OFrj#P7f_IGPX7oEdzC6 zi!IAIluCF?DPhdYnP9Vv$~tag^qG{eexTiu+`F9?J#@Z5N+93a%eG#rSwj7jL|msX zc>mdezKD?mYOLN%q0?Q>`4I}5;-sIrv%1J7boYp}_*pxa1d!=$MVg2fDqq_1!vjJ< zV!^9`Yt`A`kH_R$`_GFPlz%D;ih-Vu-%_-?F^{rv-+kB*MmADU(`Ef_v2P|yQ&b#` z#4z!5x(bo~5vI-SquB(c%pg7Fo09*UT&(o{s*lj2zuQkhlhfG@Kq8R^f6c zpvBUXK>xael>d~ywSfgFDI=WID32*?S~qXEYjx6cB}2#ZPmnRg&RAE1AZ_QXPW4^o zCw#@fp)#aC704a{1BYq1)h83GLHB?e55` zFL2G!jiCld865mfaP1sP0S9g_4SsXmyY*2TN~SzhYdD|EaPRY=1U$P}YCjn3NHvn+ zF0J43jT@;OFu?s3^LW-?_NWx`v|=(-G_LE{3-0}F5wpfx^9k&HQ(kvNo;l~R)B9@- z`~cDR(Qm)&mZcb&IgD@q7MT7T zSzX3OIdDB~)uxZO6MraN2`KM=8`Y@R5!6nyIwe(pW&78%CPK=)#Jdh}7j9j-pRph_+hxNKKMs9!R*iL< zU@>=gNOC^8?C-p?v{9v5Tl*VJ7V{?Vk!v1)H9@md4sH>n+z?hBqtU^suSU|_q{I6) zXtFm#JDkQc+g$3yuPMSZ5fWQOs6rS|Iv6BKG{b(|wx@Ll3w+6>Pr)t{F(VSONJWpM$ zaGeJ|t_*`M7W*dBxjOxhiWW?olGS|`7a_Su0S8s1Pz(S`(Fao`P0s7?$HfIjw{?k3 zA@Lm=E8W-4U`lNWK`Qiu@+=^;12NB+zTDc}kMUs!kHh;i*ArJ|Mm8e)xTudPiI}`b ztYLQmZHN0T)9BH?Pl*V)WH^QF7k?Z`lTWFJS<2sw5eYB#6F90)877GT=scS2fGoIo z&YILCvDXNAZwNEJ^UQYoWr4Al#LXqiP#kFI7V}qpdY#ki>E>3Gk>6n_Wi>UChU@FC z@0#;Bo=#!r#-RHIr;$;PDxXyBv8^s&=%M15J{6b!e9tpIXztP_Ge|f8zOcs=VP43k zSRdd`Xb+$ZtgGE_*>e)HF0Xk1Gkv0)C3ajii$a2VE|r5;)UwCx;;*({j<;u;%=?pS z%(HZ14~0`&hj&hW!50s-NL(=@yAKR{+GaMhad1T0^qe>!bDdXF*Z_-iUM7(g9pZs6 z`a5!Is+#^F#*{~aZrvtNTrm-%?N?#<(&I)Qhb0P#sGADf#kUzFB>b$WrA!yapQIz8 z6B4e&9%;b31i;^)d4n-O_=b8U<^q~IOXi^5?XWPPZ?CfU3M6lWHG#Xt=wwnlCgqh5 zy1#yz;+*lOH+cUA;)51{c_L?S$B-0iF757q4sv!z34~e&B3t~WvC%i@$lOZw)!6D% zd1h`(rC!8Zf4jXS5|&zSo@7`uv}@?m-=sXNd>RmuEWA{2oatD3BFfpXR6waQ0~HcC zwQ-(lk-kYQ7rXZl;EZ5#1~MV0w(L|~qrXBzqLJSHFBtaYW$ z{_tC@5&(nx{p<&qp%BJhBjIq^4phLC{BafkFwLNk!G$>MB%)H~0m*wb5n`5J%OC6PadH(Jp^MyuU9^GM_JNhDS z0V{HA<-ziC<*cm(K`F36m@hBia(Hfp=;lU2uonEwWM<)eX^}|?(Cf2XOo9TA38zH* z+N0TEU=v~D(xLvd2K3NWx$=+7=T_tmWpVR;WuLYnZ$Zy^H?X6e-Hk*EJ~TGYXf&f% zq((#x;49!YY1j*?qw{wd)-H3rI2X4$=-rF2+o^=XJEUh*8*(i?#(Bts9*7dvUp@;l zaWY|No~)|Tg`p>^cdYMr%4CfR0=&30unqu50UGKYviU;pLdQeGFsfGuGHea*=V z?Y@{zkD7u_O@fQ^)=O7XnP;-8&$0e8>p$mMZQ+IGjEIjz5!|0LnA2HYm6MpqNA^yJ ze~gTH?;g1Arte*4xprCQ9Dv`J>&%Gek!Kv~U0s2>Yq^H1O9cV#$;}zU;?3w%fr;5c|(HcwuF$wkfw|dump|ZP^v91#z4s-*OYPl#lzMWqcb)DRCcfUVhE)4!_Ju z@Vlx3o_$PbQ+c`;Z~{-kxAr994f4WC(jk#AaHu>X0^qxlD?P`y!!;mG>hRo7Bet=A zzx=8Obg;&WC*eL^a^_`5xy52@6T)uS`m^M(UXNLp*Dr7fC>KwbFk3W6rSGq7$=4bOmE)X%N0^H5l z901(}dm3N1W#Z9vo{m)|Zx*HTQ8nBi3rvoISD*gIKcXs3B?NTi`(AJpj0w%j6eiIG zkaLgy9Bkb4hRlyxznqmnn}=IQeYX9<%|y{gQtfjFUb~*$I;hhJdmNn^uZa9?Qb%MX z-e?hBP;kaYx?O6byU$G}DX0jd>R>692ziHOq<+NvVSTjdPFFqe3(JM)J}p4)lvMd= zH=Qa4fhUWPCJS^7%5r~q@QOCO*Dtq7avpY{7qWn_${YJ$B+Q#nmFLd*?_~lXyW`RkGesA!ul>Qm+Y1Sa*^BF{r^m$@NYinG@#0kF7ZXTl-wpaZGSzuIKzhDdyxF1HmyW^&A!06{Hd=BBn9#i|p}FP})Yb;IPG%yO46 zek>&kf69UL2$R;SN#JumGX5keB>6=A8Os#AK9qn2`$G?d;_)$6dCv^B?+&irdE?G? ze@hNzhFjE8L5H3~-;=(jFP7LY%E`7yr|$B&|NJ?o12d{tO0M?_X0=e1g!?iOxaY}r zO5OPErF+b%KIXDO7NmB)C10s3L`~S#kk0MA3xtVDxD~77z=5!-3Hnpf?x)Og($z0okO{dkg?X=W1-z9|BYQ})i z5xHGW2v<^rhgEs~Y4fC<0Yg|SkGMa=BA>GZ8Gtc4r-S@laPq_DoSImih)>?bj{;s`bTss%is}U8n z>yBlu@yqnu&3aePrOFvC)OU5u&U~CFA}j8S&a&L=&!E@7y1cOZ%+>RVO#Y5iiqDAH zK?h0k26Et;R){ME>l3RdGO2IH*43Gw))9Hceig2``eI$YOjWl!X9d1Nry2!0mP!R` znbz$3&7;KHK8=2Y1>@TG3H-3*?_&i07URGjr^{+)cq)ws2 z_KtQ?mchiF1k7p%S%KIJY1>e%G>??OzZI&h(ir+TiG;(*YMMLrnHcCf?_XD<7%}v- zO0)*wp#K>a>V>0GgQrUTiMVq)F=^{hPX{UGSg8ugfckbWxgSz@xa~ETcKX85tS{tG zc!AaZIQFw6)Rk7`xw4+$1=6eLeHgI*91VGN6`Ikghc0*soqW2Sd2(EX3`P0WY?7`+ zrx{#N&G=!*Dy9~dP$rI*#3N?zD`)PB|QeAoyPYIe7zt&e9B-!7m9e@?t+!e6O%hIEW~{5|KJm%AB& z?sr`2NzY@VBt=cW)Hi&#fPrP1TUw)ri>6BO5a-c*y z_k?BJsG4>YI&!q1pQ;DB^u0HniBc21sc!5zQahL&+FN(B_!pG8{8qg8(vk6DBe;WD z$XDMK7dv_C@-QE*=qR3{^#S#XJyJ9)`r=O#nlcfyG3mxPZf;l}f4jhLg`0Lxe;e6U z^y+4$2NFZi1xc2ju1w|;N>TmIe%cnIl#tB2rj*28qRvs$3L!|T5TEOIKLa?!XLAC+u-y5e8}j@# zMd!d}^4LP!`m|kpy8Kl%O?$56!MW5j;VAQA%p4Z&DSXD}+q$&{Ub%kqcJ-PWUArqT z?eM8+^gMcY_1c=xd@^^n*ep&lNDA&x{#7zhkFLhReIMJH#`07$8A-V2I1V{$-?@wm zgWa$E>UjJ`C?j)y)&!GM(}e{jhU^j+m=mO7yArAl*t8wDMR{|6Dihu7C_HPwky>wh z4$Z)$UR6;!(8S$8wp;^~vz)j7=?c)zT8zNv($+3@BBp)S+C%K2mE|&>V5^}6R_9m- zOCfPTP-#f~;Pkx4YCUarmOAq}xEI1G6tii&azgh*pjiocOclJnc4TYE6&n`ZeEr4! z!IwsDR8PJ4S?H5P@7mu%JMPkh<;K5j3#?L7xfiNTs;$mr>g3q26QF9xu0u_RSBEC+Mu zAvBA+@NB4{EH`4#TyNzK1N5KBmT@ zH!N@A_iC8?@5V`sxlq;=K=MR?fb-RfjJ?JX#6K-0g~>N_=iMWCXe?%@Vzy@HFya~> z)(WTdc{_bjjQqff_Sm#)yjUj5y?>n*z^C5PBfVm-??eN2 z(flI9E6E)0_)1SSWE(V)WSDJ&dbzJ@GpDQGJzPek>@G7QRzxuF-7C4FkiUba;2W_zb zl7l12c<2@PAtxtii7RvXZeGNhRu#fRE;~y8+n)2`ug9nRg~F^}NASYc#@Bx`_7Y_E zPi)W%mZ4vml?K-9S8Rfq5mte6b=bw}$RImjpvAr4qNNjj@&-^NxAWnvaq~`DpWW-> zF?0e+K@rn>X(CjqzvgBsgM)RM(c5e>LH&hUEPQM}1E@Sd#i7;a98hFlel(dS7#l0h z^lNUT{GcEOHk|Oq7oaJY-6t2b*DK$O@W%xyI=4yU9s}^IMO@KxSnhuXOOZ{AQmmt< z!2+O}#V0WQ=c~Z`U;92*j;Y;cCkL3R|J#6KCkAO-43jmbT0A4ZJ;L;9yW8Ft7d_*kn`F523sAH-kd4 zoYnYgs%}t&tyet}PwWLKi&Ox${sp`+dbxVa`86KlD$-wd##40UjOb_^|KLaWB(fiA zdrNyHiD9N4nn=D|=>9PSV}njy+?z_%)=cmRUBknH-XQ}F01fbZ zy(8KY?`HYvfcAvWaMz2Z@r$-qgPbcc5~Z65%-t9&FvE_Bd7Rr9h+@>!#7gxvxf%3E zyv2`7T9g9ATpN)$24H5=^UKmPiH(jO;m@BFN3DzKPzi~SvF*hUNx~~elt^2c9OUf( zWkLCxTxRC-28Mp-uMLy?7$!(o+!p}<=qnCOV}d76aB=v}zw;e&6HU$m_Rp8g+SqsBi>_miU7A^kg~)-{?7TU>lW zzPKPijn@DGsq6c~w?6+K@;`V2xHM|mKOM;lQebRPY(o%OxZ3riNeN9jJ5U&q-HTki zWAbRn>9RDDCm@6a1%c#^{!=8u#G3be=UR@rTV`g0lSx1A!o^(?v8N`t|E#$2(w6ON zeQaE&dkD|_Kxn18bA(At1KU2kO|F_4pn4+1gcF_;281yw-k~tnefldSJn=jdyVs*U zPm}ijkpjY|L;Si!{FVcZ_<)EkC;FY}Oq#F$SESuc6kowOBS7^6Z_cqZZ1JW#zWQ

2|>L70ls_=^@!|(pEV;=3{ zFE6=}Pl^(M;q;AMrveFMi@Z?y=F$~T0S;jk5rxGeI4u=LR8%_L2{Zh9h$zATa)ous zr)M^q{azkQ7FX$7bbcPdjR@*PeUT58NP2keh`g*#B_ozw`o-HJYNF%dwT#cXPgn|W z5|e?xk^+1nVfz<0+J#Vr2>)nWmSAQn5zD@(UT2GJMhYzw9%gD)hGyIUG<`y#S_NkG zKLr#?aey?eVl|Ra_SS!DaDyxDjKo+e$+u|ZwX*&l9Eir_MZxs-Qo%=ba3%WVZ3VJz zBs15q+HBdUUgzV%SEnD@NgXn2{cD zab(t4^4KLCK*M0aTt6eti5e+>s2UY1>_meEV4eu!BywYXuM-|EsF_H$y!!$S>!GLGpp(ye^_PuLc8& zxt$gUi!Mf>gI}K;;bp!*4K_x5QNB4QBnLF_!GBaLfCrQV<1f4Y(`qad$4;w^Zl3Gw zXyW$Za|F;Itd}Vo4u$&@u4A*ez>wLz(#)nAv%cC4bh3it2wpT2bt*l4gcICONVE;y zM{x*D+U;%lg;ni9-0WBRWqOcs^nd!j37-5-1X>d+CGr2o0Kh*rNc=^DXi$U3kW#9c z%$^Xe-gzUBhbzfasqei5WdX_<;8V_q!|Uc5z4}=FG~S7?G;V-?;C``R)6Q|oo!!a` zbiaUBFM%(lQZ}OfAveo=hR>EWHN{nruo-EW3d{ikm_t?*kp|*$r%DhJHe?C>aQ(@!uq*3TST4zKPbxevJXt z0SNEqnV^2!^E`YO^AfUr!0q99xlYWQcMpEE%sZi1=nbgL?Y~g8Y00IU5M;rx;BUrew0GEXD z5+IjvUmnS9)PrLgn3Eh-2biS#7hTzfIo=5eSi3CX1ur8G^@W|lpn3mt12E)iU#MP`8Er}(1WUPeCr%Vz;Y5(yy2%lLl}`hPTfk%Rv~IRZl6ecgg)mD~0A zQw3h8FPDMzZ-Hb9ifXgBf>O4@%_UjtW&8hROk<+^-t^!i9;1{T6=U7!M)4~M{^bT5 z0U)1RfAvJIrzqqqqh2zHPin61sCdJE^7iFt(5T6S^qC)T|6)q4CGFu84Dw5-lHZ}q zR*yXO-95>1nAzM&z@B#dby92z`b>B6|B+gjkC!H|3V}lDFGetk{@;)gmf&bx&?RE@ z&XXZhc=o2>Yvv;vw>#tBJ&xORY~-Xr0G#ZNXu=PzU{pI$OqypIjmsf5H#Zl2((^k; zp=t^qMn6O%pY-AGt#uNZ_~jz0$va*r`sn-72C_&DP|))H^kX3|Wa3)43$aI#;#jFT z@Vh8(S?a_EB|39!`{bI>(<($7%H999g*vM>biYh^%%XAS*i8P-Io=LjqUJK2+U&z> z|LiH&yYcS(!ua-%zNP-4YrT)TeMmaNXI_3iC*N*;3k{E?&yb}3Tv;kSxIOolJaArC zt7JHU5^8h`Iu{Z3m>1aLLjZM|y?T|dpM@qIYWP>W0zcfd`E-*2`yf()mgcOn!d{1O zX|8-F*yJz_hnAVt+XUF>X!3to!qSvK4PzsIM3DchouYbjGkUE{L@9m4H@hvpCrir| zTB+x^II-O}zBl5@wI#Ufozm&i&gJpW)uN>dqfcaixY^k9@b~6>zueyLoBweEl*io! zqfXBKp2HqohQJ#DMd8hOBP&>19Q!wm$0E^tUZf zMna32fp-D&GC{WpCY_){&La%ot7_E#ok9qDn_*@1yf8CST>pHcQCJ=pCvyULjBz2> z?cTUT*Lc_rx34NItm7+zmmkx?JBMum#cE_pa3lLF#wlAs25{I%X}qn9jaQ;)CYdTwmvcd)hb zfGl8L+!63Wicq>BB$`a>F7_&6hbbQ(wB39`x=P z6ex9zlqDzc2Qpj*j;C9iqN)670FW;g+KV4MrU}CQw{cX%^xO<1U6- z%5Tl3O-u==!=K(0{Mdd40%2p2`m%|}jcT4G%oVzq!($FVhdR;6E4Y2_Bs9*+N&^D-aEBuB!1oo~wC_)Jz?G1uTVEt<*PS zL{5H%2ml^}FL!X|TeKc>>TIiL!#{$u2r~E5(Wpo*S+eWjPdSJf6Y($#!-GV%Pk$3) z#kafIGZdvdVw|z+nN2t7Cfd;MkRl{zW)<-?{U3Yh_YN5XJNC%G>LT>k_K}xOU@g>* zb499l@?W}DFHX9K;*rp1j;ui29&7g~E%l?`B&?WlV2LGGl?1t0uCt#U@>S(?5swU8 zC(@|z>3S>HThYESuc4v1!s9ODmG%xCWcC#O9SibarZ0chNyeFwEz7}` zk4%%SyP9Fg%IvD(UFtHIb0g-k4X66)bK+3~XT^By`188YLfhF?QhfB0ougRYjauLu zbo0^+YvES=n6&du`c_KLE}YZD`cke94 z?HUuskiJK)*Jci^&MZHJk$t)Fj^bC#25`79o=UARCxBxG`2TRz@BzD{?8A^UDqfCt zgr|yqe=*~fkIz#w)W5d!4QCy6IL7ln%(J>Lg7+ArGN1ug-T?8VZ+N8>&psM*B+s2D;G%a?o>pz#FAl zwv(@0UP&D@dDnx5-M968;557oC*z%}$yWZ!{oh2brYXICtS@_jWy^4A@n(YnU)`YM zXANo9m_X}Si%(_^jbgm8eMXCwEHR7A)2l0Ao^xNJWy)o&e!MwioUhR2c4ZN8mSUeZ z`QgVFzWk{vbsO8`8HF`f|Kmd0(6ToP z!_5wy5h7I580jn`=I1S%bRf{6J${OWdA=HN84O)A2%wkeZ3Nf!a#`K==gN#-|M3Zk zO=S^d>jTc&=%;+SX=kO>&cO=8&316Qx4uLvrF-P*ZPLg*W0kw`iK1?+vPCS?g~Xyg zu8h?Eh;g9>Li**i5S7^85^+wXKq)7lec>ykM_=`O^30VAKZ>HrqmA%;jy{4{Bkjg$ zE3`<2+60ng@LlY9hXgetNph|`CSnyAaqdaG|HPaZHp4%IW&Pf60~*rnyPo0vA3ZfA z{j7oAH8bOVZ1rHQmu=sWKXlr}E|V}HY`=+HD7VH#bPH^G(CYKsO=Js`ipY>`*g2 ztoL;F*LuQl_V5t&f-#OZj^KYqt6u04Or^caZ{INhAN-#~2>Ogk0=w;*(SkxZMi2z5 zhe$n|TEtY;eD8>7s--}E7D{Rmon_aO`scx6V@EHV8X)-l*R`=>o!YqVQ^CFH zjmsj`0EKuY-v4IVbwI=qE8NlF`?+de z9Owj-%NlL8IO#c9p!EAOm+`G_t-MKiZ0XXhi@CrBh|45JeR+YikW5caJ55II-O+3b zFH>v~RE$9V4(E4`iJ2?UgP9pJrm-~`-lyloq)dO$xd5N=l=yCaYrdltL(y>(53qTKLT*@#V|8qGgC|8 zxp}&852!4r4>|G9|8c5>D}D_ulr_(3(S%;$P~11^w(xsc3?x7OiY>XEVuopP66UQP zWfn_Hf^TRMpu^rz?h|i6@}CRyzi^eT5AetTClr9I3GIfpK6eYP-82o~aZUSN0eu9@ zeLeI&A?2i={;#%Mz1xdNgc@z;zPn4zKi@1LAoyY@7`5A|QI+ZXwiBm$50@QAuH?&h3=)Y-dFi*KAG&oO9}XPycW} zNZ&(&beLOx?a>o5DgX7O9ZH~Kd$gm!qp4M+3BHbDOMyE0Fb4w-9pf1;zpuNn^L^-+ zs#EzS{lQyGbBLeJ1y{nkM`O8jb=I+o(homKkTppaL~zPg_|LYR$h>s<75#9gFD5^= zF048JAaR)<`~E|W4AlKOepT;oA4@6V^Ws>F?p5u2lfzhW-#y9ss6cY0&h!?e3@L;5 z1YuM!?<+P#=Jdr?CKweTYj{YlZhb?gOXHFB&g$E#(I{0$HP7kI==b zUM7m1f{rzbrP~N^_8GHTuMyVOlij7|0B1*xP?Z28Np~F_X~fT8fY>e6O3%{7Q~X;c zY%4S<(|-y{Z@Or1F=C`Ek_~!{iUlQS-S}HGopfO zN0;}wK2`R7l@QLO_RN)qwMg)QR#*7;MOzE>qh`S^;LCU*V&{u;#*ZxtRS4lwTG1dS zV?v4H5_9Y(a7ra$1o7)q+)Wm_cy>so%qihoK4AbiAF#zDOg{?+tAWv5e=k~4*L{_q zwu4y8JT^4a%wD-|PJvZiUWdg|{`EVinKKABX3m$THZkB~1tobTyb*oZSo4=YDt>4> z8{yqFd>ktVwRAWMs|hZ$aj0Jmt4+ED zBgK&c6d2~=u=Vh)Up`8(UYJ`8nixd?A#ERd_bGKpnyw|6hsL$9Xs>|6&6D+qbWvSH zjnkPBq(rd1#SuKd#cd3t;k_4}3uh7k8){;G#}k9_7SrT~%cPa-N4LXRN)%zL;xCDc zFhUANZLze-Ec&HMuKF-by2LW&!>}oy%FkCt0-r%$PE|rr4&rGTlqprG7nY}N2y=9+ zAIVdq(TIY_o%7>C+=gnI35{;_UGD&!jS{Dn@e_S5^L`0r21jMuW&IlCb zq~H-$b(QquNY~A9v8S0@3%)LTAb@p*$YnGg-6GeJGb~T$-;*p}SqF#FfRH>(nr{%I zgmUL^55t1$3x>mZ=L{y?j}HGAB)844dIM<~^yeOKpr`u0=g2yM53B<)Uoz&ONjjIC zu4T~P6Tox(2ON>At1c-P`N+gI#3*&J{r4Hnk33BJH16k3XJK7ROq=D}tmB{WseNa?4X8w{?(_ zj*XULm5I>tbUjLdmr6yHAAGB8mCDP1n_KkCtcHUL*s2_m)J|}t?zC&>l0l&3n*v3- z`ntCmI?Zz-_}bJtATXU1B>Z$~R!Q(Fn4fC*_t#B;yH}$E$Y(I=R$qXBx5ke~0hR@zkq1SdT?#L-t z*E^4z`kS0zBnp;<)2l$pSv>n~VIRLmNWse9XS<=42&S(@E3HQYB~3Q0 ztvNz3abI#GgS`<&2h=$#fEdAZu(jIW(@a!xs$g|m;>OvUmC1Xl{+A_WOl)l{`D%9n zB^|426;74R)4Fw{(XKc(I}E*9ydf~YjRXIQ`qUaHmR7^?F%{W~BeWu&oG=!!mhKkj zYlm&fDNg>psbzIeTzL34VY3dY2dB1evs3FaaSSVkt@S0Ze8--IJCNDW4s2IB{(S!? zi(*KxzNOh#pk1MY^434DU>RG%RP`{b4UGw?jEk(&vM5Q8{so+VGjStsbuDhfy!A z{j=|LIH*-4Vted@>Ri#Q& z9qu-luv{N+@TqSAr#H|3pV%79FpRkDHWTtqY+vV0DA(J!@z`XfM8pwjym4R_EGzwI z+ced_U{P0Lts2s2iV0<4_~)=pi7KG=j6a;}OaHq@()p70dyUm2%D+|fpe-%V&x!#q z%Q9HNp3eaW+LsHP)dr5=7J6`V@U-SlP(& z=hVg%$^K6huAx2gByS0?h%p~Lku%-B9Bm7)!-IquT5t0x->Vqklh|E4&sKr<(aT=QIB zStNG9nX@NOOHxxR+nnFW9Cvs=LX^*PrWvNK>Fd17dF74(?^S zv@Gc7#SAx26#M9fmBjr~UV;lTd*D{~f*iysg%-tod7%H(|g7_Iizh>eyF zxkke>h5Vk z3r^ks%|KEer1E8FiV6T31zQNur9$&hha_*=WdC}lWGmPpW&4jm$L&#$|9+qgknlE8 ztv7B64f(dkjh;Ycx|iudvMc>i`NzaQ)wMpThNcZAMbOym569?|J*J_;(0eUVPP(=ag&3CIM#wH zV4M_0&3$}s=`dEhJ>M(k8^6rwi?-X)aHM}$fl86P;L{bhT1$UOk zaZb;T@|jwbLH8ZE?O$9*7drhf-CKwxla%NMjr4<*s0>jm;(Sw(w>lRZ9ETx=gK6)JrzpbIbX7+` zi{QTTLkL+Z#-zvzf8+htjiYzW3DEU@ki&qR7TX?|P-V_B6-i$EaPyxzyBj#(;?gIn zsW>J`|M?VgTUHmE_PMx8%Si-=qh?c%$r{E zBoibSE-4dP3sL{;MMGyEai)5Ub-XTg_)7e*Q@>6xT9P=2TE3utJ4Xavg4hg?o;<$1 zIm!kiu#Eoah$#XYiM=3Cz6`JPxr))#@NXa1hWnnbvc0W6)nsD!%Yhqnnw9mV{W z2H`8g|BMcnSee8M?este8Ngfg8w{D38NXFjeOJv}p4JK#?Rc>L@5|-OfE}TqiJJfx@w}6w_cJa`G3+phh;?i|1 zl~>v5p_`BwEQY6%3IC=<_fK3I-(ezDx{VXy{K8ha`%U#?!Ss#}ev+5>bXZm^uu&!O zh_s=PSpiFs&THj+eDE|}4FCH?x;jf|Iqd&2EduV_zEnO01HGhP2M^uNiM|C1ltrE~ zbb5S5>X<7xGaNp|=iFz>sm z1=dF-65hTrR7po6G-~6cicR;r4K^b7EnwHpWnj{_h-( zIRV%qU8~D8@zmMza}wcdJP*Pm%V!Y0oBY{sVeRU2<6ehCwJTd9>Do!bEphUlhhj%% zf=)!8`!&eqw@!d}q9E)wc5@Rpi}#fbEE#$Tl#SjVNq9e{RxbUJ?M9^s87xeEu|+}z6`yEk9R=|olK zSsRb}3`OHU_BUn32${pt7z5ANbzLxg_2WBWCe7?B#{Lzy*aX9?ta^{tq+&eeFe0c> z!Sx%Y{B!7)_Fm{uRhu8LJg4I@E{Jsa4g!VAsw-Dp*d!||X-EIe{o8&2>nQvnJZ2ih zxl!lyTG;R*md}3hTEKLoB7A&XHntSw*XSSNRGQkAZ%B*PIR})BxzAYYnvKKq57*63 zSLgkq3*Xzk$FzZ{k^7aQ==P@g-)jcPpmEe$d7Je8C-3IrOxw<7xAlJU`i3+%B6a-) zM+fKuM1^wxu)t|Mq|pV~^4R~u=Tn%knU_e%LT9$ft6EQXhG6qJE5r$F=15JDQiV!h zU6l z_ujG+;gdLEOe}}x8BstcnVUcVh9~tWX$WIAU%sV&zPN^vES1d9PbQD6cue8v`U-f5 zF2%wF5hwJbij@n^->nKX4{H!x4`qIbARcI<+_S*#6 znWZdeC$Gz9?No|OLnGHBWs8^Y{Uc|PDyW7&TEUV=RMj2+&zmq@!7Et|Ks_M+Tb%vr z1_u5D1Uy3kkG3Zmf@pi6>h)gfQ8dBAt%lx&CBA`3LXTS$P&_r7tRwloSlE=wI%$I; zMw!m?2DJgR3FNI7D;R|mdaa#vk|V_LyxxnS9QutR?9!K%rQn{}#AhJaA8w1)$bjBe zMGHleCw6o6Js|J$ffsei0nD92t)~OUBi)-<`F^)n_k&Mrhf* zlCB8d91GbLqJFMlxpoM6f49NXnNeNSAXuUb^Hap{lavhSIX>K((!)apMZPf|-ToE4 z-`ZrtS|Q`?pbu!|Yf-;6jEMM=qaF+Ec&ttWQ;Tdg$roD=*?^`a$bDisK$7nzb_^LG zay?slC^=hm4v<{c5pOYf^`#P1RrRsQ7Eh6tER}$lCe9Z8(?ghl{u#8tNFPte>%Td=7v$F+l)bSa#lPhBh)_xA89fa zwXaU9!KtL4IxQhKk;}^HI;DH!^NdOQNO>9~`Eu@NNGQNsO!<9OkSrq!7Gm~y$*jmw zz1UAbD0}MS9n+`Q&YNQx^JIP)IGbTu4z!B!nUO-{GAosxXq^aEr@|kJq4c&Hb|wkg zma$5fcFAH&i0^!xd$2-!epmvI$a)Yc6JY(zCsKIY5e(0vfI@j1Wtf}AdRY!pO6 zH@X^B7suu~RM*@_=1Rez=f@C}2uj*j@C`T$^E~4nf0R&OM!q3O7e>MM7flI)@=RV% z{QLk!@=qyrFZh;EdVLm<>{4cu3LaieIB6Up2dZZ#yamtP`<#K_?n7}mnkX|y}CshLwg1>A5p=&<_pPHC#AdR~OuAV9h zEY8g`GL4VN8jEE;G-N<$cT2FP@+zKY6;;Y{Vx;gCk_&=!&(2Q_TiwOFTp5>I4vhEF z!+)&k{EGAKWy<49hU)v)LY87X3>R0>2o(4*0r=!)_+P_A3TnoZw59Z)SQ(E2coTHX>nI`&S#-3Z~_vf#b^&^7puatq|?Up z9P=>4l>DGnagSzc1MU7*=v6BpqwGnIH>1*tw%6MGMP}!#xI7nQ>1bzob@{8_bwZ_s z@bu3g!v&FPNWw#&R*Gg-W=h8KCwjH)V5Z~96-K8a4Mfn$<&suPw^ZltzUy)^Id~_z3Euw}I3EN7nP|Y1mw#(K}TJ}k@uJuMReE+$DIzih;G7wM}dnSbT{gHi2C-D z(mXUIFss0(j|0o(Z#0o0U4Ttl7iR#Vsx@yLY@nT(8PU+9LIH1o*>On4iMiaG=fK*j z$$^7KH|iC`S!t&@&|LYnP zT*=gV?~1$K3h5d@h27uv5-&d*b7^%jpKscxbvRkC9m%af0LR`y7eA#Ucz=Yrt7SUP z4NQzf{9vuNR{@>wpBZ_u!dM#gop*d;=V4x0X3I9u_x;i^!s&s_4F{LCF^`+KM5`fz z!=qf}k~sBr+F@kmIYOUIvq<|ZTLmO7oz4F;y`E6=oG#i0;(Cf_0Pdco4LPbXu9%dB zOvdCcS^Zwa*WTL6Y=ip_g?)7?{Pj3`*_-5)y zY{J)}?R<@m{IQe=J*~tOz+TjDCKS3hrj;Q(g2iOz(>t?V8%$#h{ciPpbjre*E0y?L zVX8MLR?#(skm0jM;%6>x$=6evRaylMIJu&~hwb@&(!5|%YJQH`N$T-X$}-71{XVZntZ0R5(#Ns**}sT#2lJhS$SXc$ag6Ck1@MSGLP;@ zBi#xa1=;lUs*n*PBG<9toN;{gcst7?R>#Q6CDPSxIktj^M$?GpP-Mc7@VVKgbiP-6 zHSZJ65cu+j+F390 z*d&)Dd+FY#9(VmZ^)@1AZ; z^@-oAW;ISp5t`aq=n}vFmN8n2?qR%6`ZUgbVulsuQkC;uIwg;(gAH|VDL-zI$Yq|I z=!wOiTfC(9N~WN=yh5L~W3s$STU@NGTsI(uu0p%IA4g|ffo?J9=@8zJElC@UmucwM zu1($V_$*{Impi7AW#5;pxL_*oB};%}wN#M=KNB$|+nXeC`rN&X>~pWE`3*96S08r9 z?*P|v2ZsuI)M;h%xHFiW4`u=e>ov*j@U z>;3sgKf$x3{rRr-?DKgy71+7o65UpvEp)K5Rl1-OB2JgaEcJ&)lLKsH{FU&E4jv<( zvU-|M8UUK^aq6@0KWO0bmyrcrF^YN6H(9T$!{;ZoR*MPQ9VL+7;+x4X;i-+eQ(fMN|Yg_o|iWXyXU;|~+>8jy4sXx+*C9UzX z3FU!8x~OH-5+`;092E(Nnj8WHc^CnxAYf^S^~?47Zh*xEH`8IN9t*;=(6Gd$7{?df z;I1(uz#&+JR?J5}YbW}g8NW@CdA8JXoKvPx;gWlRx1%y{c4{5L^49wHvbtLme|Vl? zenM_gU(OgV*!eAsM4K!FHDBgBAE|7c_7v`D(@j5Wv9*3JPMhcYTAx1qGWjEU4)x>v zPSMcW!oE0s-OTAMzJQAqhdu{s>EMqg5-4?I#_HtJS*(*P6-`N4XvJe+N{ob^GBjk{LoT>a`T%YpwdX=)qKuaq@m ziQw7>9liug*3a=LCQ4t6K0uK{8sW@=7y!r~u%=k1DVoRg9Ib=fw=>hR9P#nph)5$R zx27lWHQQub6!~Y09pYEll$m~{ROIg*={+8Y)hW2ALsqbuPWWirSA`==9XfQfEoeA& z$MRLf7_#t3RzB8K-z2x#cnb7HT4bEt^={K5t3*`)QLCOtc<=>DCJ#B|f-N!5h`^1531{b*PXJ8NGjtnE5J~(KfJAVSWC#u#0i3pER@BttCwDK?r zFob&AF3!(=Y(JIZCeUVf{UW2Y1~^|^#Rb+HQkQM(PNr=h4+~pYg8J*b+jPECCR@{$ z0g#)YTSVrZ3%To-XG@Hatba};rgpqb!8_hvP#M*k zEAkoYc?I7U{^nOyuAF*uVEn<*U#>#Q49UaX$p)7T<=nj0Rvt061K*Y} zc09f{m`QS`U*0S^r=Pejc-&W5iQoPip{XIbN5(AFtUA4M10UHRZ^cInXX~}h`!}sMzxt|?7-HOXuwtL0N<7C9&ppqNAA z1oC2OMr5h~RBuVEF-gI4*YU0R9XH#l6sIwtS;qX!Vt7d!FN#@ z!#49rbx(Eq5ozk>OJ&3SWeNSl2rPZEKF#>D5=g z?f`t#(XwTr;j#ncekT{MtNVv)Fik7Zlp%A+IF7yLppZl$TS0MTy2wsSGEloclj406 zKTE~vdqZxRkEjFm+1z7dGlb%w?Mit~%Mr)BKySOO;iB$)U0B;N>bGyanmc z^Ta4x6*892$6@BCvDEYOiEcad+@ZI<=DwSQ>}Q9t=r!h1L#e`(lN1E3$K4+7uW;u9 zF2WU+(V97)t=y|*KAnxc^2|eSc=(!u^xM){3vQIW#PI99$8e%Y4=RT9c#7vc>M0MI z)h)%25V}bHdk2smW`77xQL|IO+?5(q^X6vX+&#rN=00=&lV_CrCncO)`f?SX-QI8@ zW%#+WLf6@=UaMnC5h7eWw#>iA)F5Qp*OD(JcKU{+Y|bhH^sqX%+=6-g4J^P`M@d-1 zsmo8jj(T<;dMoI8Qr`}Bv95G2zwxXwTCCZYGcXRyr%xfL zR2=;QkMQ~}4`z?P-%z%pa}Z>}@6&5p@O67qxAa)rdZFCgf+JP%wUgSqV=s@-)<>-8 znV%Wp0a4ea|BtD+jEd{~*+2&??(Q8Z?(Q~Nad#_Lio0uDptyUH0>xn{6!+q8#ogWA zxu^f%d+)ky@o_k3@0{!>$&>8lJF?~RBMGme0Z+ByQ;YAB^DGHY^9PH#`7Z{iWe*ZW zC+8j_Yg@6%?HCg@4eMWpja9OSKnWv(PI=rJze7AXl)U!Q&n+T>fzJXN^LW=RJ6g+4 zbv17B?;!nTcpFWBrfX%)Cf())HHgiM2qt!ymnsoEueVq0+O4FZ>jR&cs@gIQ7s;7O zI=K-G|@jl`PD=)ZVY+MHw}BUP0SE)NOKwX>O!R%Q5n@L z*dhuhNae}(==w8Zpi8HFu&k>2sd`R^5`RVVq~?#&J#MLsibty-PyD=~6P zVJ)my6$U{K%@f?$OuydcN9sQ8=VoyC?dHs5si>&Wpzo%TvNS)r;3o!M7ySG?x;Tr? zoWz+sIR)OsvZ+o1P_a(bcBw_OE~H;rm9p|x$-zN3a3l3jCe>qP6mW9h2=AI7|1XC~EZauE@ zyxpY!2Zx0!Jc{N(DF@DDX|?VkqlOOm^)5t9o~Sd6KR}#c9l7=^&b81FXQP_O9`O3L zqa4q~N$Me+BYMn2YxbVENKj{*%9BQYu>TsH=4+UPx(%%Q@umG|nDh+I6L^GqE6$;< z?6ZuU&|~|JkS5R?P6QMO0^0+J$y{{t(KHOKz$hsD(U%p}BbNG8gmium-1x+A_I~s} z(HeX0Za1p=vETx=j;>{4r8@~Rr{bozGF{$Zr}{SSO#jKvRv7R0@CVxhKlS&m13`#8 z`9>M2DBGMX1?(dK9yqw=Nzev29#}6o) z?iz0i?IcnswNTe87T+8UT&Yq%5il<#;M(_1!s6r^a_w+6ic?AFWLtKZ{yu2wuxx51 z7iwJ~`xb%!rKC8Y`pIhmXT>`uLTgz84m4svFZQ|s^ui}}J1AUOO2N11v9hM+hNi^_ z1Rf?3&zCtZe-oTF`MakXy!!_tcOW!sdV!0l2xUQ3EDHcv#j>mqn=P|dm`&4JdbOB% zY>v~+wbf)71|g-t#;Gh}CPtJ{J&Kpp{HbL}b;3Mr<(HJ^%~EjXEWe+rq`OoJT771T z9&&7Mlb8)T%!!G?c=VGBebd`^=^1-PwvQ`f@9+CC8+drLT}Bg0l%SAQ&Xwq%u&mAb z+roE_@!}_=K)ttrO|D*P#h{*s;f36$A^Ly^L#WHf_hVofDOEp6FUgix1GJXmWb~aN zKxm{j0u7tXQ0Jq|8XUIj!4ulrN<~(J3JUNCa!C|>Hx5ho`ZMvGC0dWx*oQIetW24U zH#2ajsJT4BNg#_FQF+y6Ry!gPsqwu$dYO8&9Vy=BGjjX9J|%h8d&R``dC8xr5J>-0 z=qGj_Xd38OXP7kneo)Nmjz1M5)z*%U$lz05u9ztPBCkQ@WfzyXX3hJMYGh&(Wq`!GR+W07#0 zrzqGF0~MVmfcl*m7L0(TCiI+T}SBV z+_~NKXVc~lH1sHKX4$Knh(X+vVr68Je!rwj%~67oNHe9iepOlxYWgrW*89&W)`q#( zWRUk~$>h6kqOT`8V95_1AaU7`qlzT9``Z?wgVsXm>6?Ju$oy6Ws{dXGZ{n%CwOpgZ zWZzM3UHInh9-X$|Wf(3#{MAJ63L+Af_7qbAX54&u$%7Vx?XhyqC=4nwJNZdruGwz z!)unc0@bcP(z71og)Fzf)NzzQiAK6;7Z_saB+v%OWq5;PcJ;CioW=-k>ZMQv zajVyWBNuM!ibsyMq|+g8&C5Ia0SOFIgw~{C57eRTau0|jffKOOO1ZM9*$m%5jT$9$ z6!Q+aZXOg$yHBLIi9%rMI?l(W>O8|gt_u!CsRuLbqL@jcUmbtJg1`=}`ROskO#2G& z|60jN7e+9gQf0tJ1E1orsYu(v^`;B~<&9#A5#NCcx>yiazSni*?n}g$Te09cIRr0p z&z{or{aDhD#j;c5Lp{huIK%wTWB=dYZU`D zYbdV_j$8X}yPAYc1%hiR_Y~iN1W#(-=wtD1nLuO4m9@J5Ta8i7m24F@K&w> zs~TB#UDTF|p3frd6Dhnu4sbpg-pvkBgC5H!W}!!bC*r^drCWRt6^Hr@TnWgBNMMVT zIUatmZ8)*)EtqbkSzH?=P+chJV%NO2pSl#gW_pu7=$F6s^TEs(w=o*{FDoF~`M*{% zr$YLJ_#yh&&nE%9u1V&W~+RW+vB*A@`&hFF{xWE<~caKq!$|Z%)lWU8E zPMlZxe)MV5!Th-YND!p>T-OzR1*5C3=!q-;>tl_& z<)vhMdG!%V2rI!Fkr7W?-jHEni&4%^WeKpwKgR~iT$+kdjrqlXn{6`DJa%DzzbL(UmGl3ui| zR>Q9TfIsN5sw)Sra03e)S-Nv*Kv}@S!(RG{1~c0-C1yQP2^~l64uR7O?5f`@;Sb; zt;PgeF_>-$pM-R1K%inJ-JV`ZrSnt%7xe~}Qnh|r=loA^M_aWVc5AkGVArMGiHSuD zZz*XC4MlO?+&;0iG{MV7_z>#Eg>$>&r>5j+9Q3bY;6Z!T*gYFTd4^h82EYS}S2aIY zPcNTvl=3bvdTL!gr4Z1@7O^eYy6+D(sop3pJs(>qyf{hwoIUq#cvJc3ytT2O`!B0A zMFFv3w2;>%{Q9{@T0_A3tLl7N?z`NOd|6Mc@>7y!R=XZ9x@qX40R3@K6^0*RRjk$Y zzwSkBJ%<^?k2b)LqPNg5^Pr5}T$z71wtRn91JWlU(cx@87{X%j8`^xGVR%}(?1TPdr4h>{j!T33e`v9 z^-C~$Y%D#UJDFXkW)r}l0GcSzf8q15I}rJ_7IxboSOPfy*#vK^;ZI(2Y&KDvni1lT zp#3dIgF~Xbd=HrYJW&iE%XfO%3Gd+?SyJ^$C6Us}vpw^KUsSe990+&&oUuUp5rtHU z9V!Kcd}TGI$7U&Zdi|TvjhoK`jdhgm9#w_oOB3bDPEU!L*JwXgxL6GnCrXn<8cG&C z0|rRvZC@28@K0_JiAX)g5L9_&34U9w?CrUHd~{dkXLQM*X1r{sDAObs@Syo+ALCRN7T{i6-_BC3ZAbaH?(7JLM_eB3gXC9ZdX(u%&r~{<)9U0Ir zTN%StGO3^PpMsg+Wf>B2;iBe}}6%il(%ajR|sjtnPRFC&gdQDUwBb z$bsjn^3rhp?Av|(1veaB6(I>`kQ7m{BR*zIMH6$Rcmu;S4nC71rWJQZ?{%p#$KU7;435`s=tf2lgin{h}F&SSLg z+8I*iq-z=wwfdg^UZ0!1cG`b!rOqo-vdBM=4dZNwxU zoE|!$sZ5L$U<2%D3VGS^FKujPFSK|k_3BA{FzR%v#l;wOj((Omuh>N{1<2eGeTI=W zj86;6)y{YVY_u)Lw^zH%k3scCwb&pY3r8JNTlEp%5vv#39doFf4Lm0>F34aX@NfQS zYrfEQT#5-%in(p|RKeFA5?QGpiuWL861m0t?_fG+OxRLeWY6;nKXg$qyV&!EsH>d| z_JBID8jgszA_~1Y6$`eJz@I%CQy&2;&iF|K9yFC^#)%JFsC6nLNbjYSh7WHd!ELIJ z4~5s#qjA*-tPyTyit&A6B4ld(PlhM#w)im=E#N^+V;vO&?-m;kblx;=tn0C`>M}T- z3SpNrN`8WMP!6Sk0Sq0SRb&ra>wWeeA_$AxP^B0Qnj;ys!+e~s!3seNWo+E#`dEzj z8Ycia9x6CO)x#R%RYWL>fd@7~PR0Q>B3pC| z-Iy)q6T0L%ICPfQlah$3pY zN181&;0-bDJ4a8lMW8bb|t&A*}7W>h}e00gZLvuesG}C;(q9 zp=|D@-Xy=1plTp8CxwZSE`~Z-{Kq9_rtiGa;};!b+<$=Vg>fUYHf6|~{GZlx2Ey~d zRH3d-wHzHoX3pG9Q2vwCJEVW1h0cec8il5p7&7;j|DS(G_R8FvQt`+iTM0v}{;6Tp z*yGtzmG%99XMiB@?QZ)`2cty6S|fh3zgR5J8|p7aYYnk+da5p&@zpf5yq);Ua=kn6 zH1ZYMgNNPmXUUn9{V~Rgx#qUunlm~D@)y~FkLdCw`G3--f)&2I-CB(iejecxs2}^1 zsdEXhI%OM0GQS$wmQ@GBR&VZ8&brUqH37bxV@ri{JcIZjEFLWv=uNMdQpNr+Ljyno zg*?#FT26CwfO91ULUI*>W`LYR(idI*>yO{DIN9d1`82cb zo=Nk1{I7rkY zEok>>9|9xN8wH8(OXCZUZC zyJO>gfEoQ7isa(!HQWS}_5Z(U#~Ro{#!UA?sg5D9&E zw5wy@ou~A`qyQ><^mV%I?^OS`n&^t;6Uy{I<#Q6uayQnYMDt-m!k7`xTkBWD3O@}_ zNzO$%vz}7T!iZ+zmZaZxn*^;c3AJl^@7u=c;H`9a zA7-w4^GaQ-AeSsBinQkVK1es#(Kn9(Gvj%f&)!aYtoo+*?%~r~C@+d||BIyFNQonW z6bpZ9U3S9pew&;PJ*2fSV~9d^NX*9Kn2L2Uc{dRkCk{{;oA~KEmvuR)0+Lt$D*w~6 zehA*$9o^)~bR}HnJKHSZGuy71vvR~H)InkFR&R%G^@c+Z;un0{YEzl>q z(1qb&Q;A;~zrS|;9BB@Y^ZraNqj{W1*^6Hxhe>Q0?L-dBxE}whFp2|K@*$B;WD&#S zDe|VF_!s6H1s~(7#(4dO!HR_<;dzM)U>TjA!3B7|O$-k1Bx#=G5#p z=sb@MXyC|bt8DWhtoziqNEtvyAl-0=piz3`OGFJCYph3KQ}X9{T%8(-FAJsIg`CRw*AOF@i%nxpn7E9 zVy4_f{1m&M9W4*n!SIh4TwzA3hq^Kag~Pn+|wiZ@4 z#S}z$He9CR0O8c9&ht(R8{Pzwr+ZPmXEF6JZ^HcfL&uuq>xFk?olwFxGL5RjnuR%R z76ap8Ij?EZAq07VPOIUHcYDT-$eM>8FiGit>tiMX&rf1lToF}E#n&Da+2P$I;>!V6 zUpos>NA_!48Gis+rrFECg^muNK&eQdeLbYeDf&t5xiwe4{ml9ruaIOz^n6Tmbz!qTMRqFCL}1sFsMCXwgss*5zh`8HBJ4-a&aJbM^BO7QAYphht`)$-fO z>1*R2e=THbI#G}8tc!KdxhuN;;d`PqMu7GW8{7n+YOGcJm_~ZhWCGMJb^{{$NbTle znR_R^Z%XsG5@ZrX`yK{`q@f7fSGI%?riigw=gzI7?F7fl3u=wg3*wqWu50UN@a z7*KX~OLv>|ifEQd(!^<~W4p zL^32h_lx0Tj9u_Xjvj!7MXb|p*6w9gFDDH6@}eep(A^|yI_y!V=4?k=LDs(|5cm%P z=;`5#k}CYha5P7^lX}|sZnygv4NhJqvezz+#6J$AoLuBS?V)2z?FO33H~cyMB7!Jf zB9wNRBRZZaTyb^%4;%juxWfEcOB#a0g*8V=VnZF+H3a2HeX(%d0OKX|MmFX&aUm`m z=~vUj*FtQ)W;5A2yzFu26ql;7RnGC)mz|1dtHOgLt(fc7 z>4X{7%j;I64Sa~Z?JGzxBW^p)F_bA5?IrUFJDSz9qexA3~*ft7*i-)Mbcxxh(Xu`tugh0 z@#p15phM8d3@a=N2RaMhk8acVHwC5nRLj4aIx5>rOEaL&F;0CZJWn%&Wf=+3SUKir zc?&9;{?qH0=eChfx(bQM}Kho3*(bERe2qpse*mJfvQ{z0{>31uBv2Z&eN zoNVH4k2>~lqEyAy8rmeASt=ZKtx>4H`eo>jr-JmmL`p0O0T#em;unHCW>jehwai!S zVG>@N=BoM2-YbRn(bpS)_{Yf@gvj^&8e-;#6R;=v z>`jyr4CI`o@g?IQph`>+F~wV+>kD3-6>=d|NWaxt&%E02%>F9WHSx$@2O=R;NTGYD z9fHOSLsAbpf-dtFNf;S7M{MY6gkt?)h(UPson{KajOzVgfGRmAln2X%#Q|$-g4$*% zeyR90?C74N)v;L#W7TQpVf4uuc7LZ7@oCFIg;LC^7q|R1|3JR+liHKymm!M?S}lCK ze4<0efrxs&F812{x~8vK5L)^ul_OreS#tx+vTyeiaRxYVK+)mjC@ALrOMorxa~B!A zd^a=QP-g5GFfJ=ob{g-2mysP!(E~u}&kB)57-b9B-wJKs|J|91n z^IWAc`P+t;qnu-#hsD`a+SyZVA-Cs8Aws@zV@&YhV!E8bTouM;;h;Zww6*<%7V^!k zzr$cbBpW;B%;wa=ar-=mtYRy!J4Rv<%{c3R(Az&ejjb^+SmJqsJYd{u1`ZIA!dVjY zI!<*Iz3*qjcg(wR7|Bt;qGEoOeLY*ZVd2JI)E@G8C?bl|`|VscWsRKfPv>&)NGVD? z%bR-;p_wca`U9ak0+V>m2L zgJ~^DX`M9rM3S4c6`zu<<=$ff@sfZ`!{ffJXVcqEIEm{hvEWDRSvGL3kOsZ5*Tzr7 zMIVu$16l~u+!lOmqY+Zl(LJz!8Z^Pf64WWzYvH&c#{%a~eSQDg?z{K&_Mvp`S~e(& zLv{oleyI1@I8G7=28P`s5mG>FVBVynXS4qcgIrC1d}E}cbGo8uQJ|4h3}ZSrO&`n# zE}Nwqm`iKa1Y_lKtx$N=WsyjFOhjy0yth}(IJHIu{i+s7j~+b2$n)X12mr#2zvJ1X z+%#`Sqr!L%>-`z7%aO1CpBFvgC1O?kLM5wwf9DIT(L}P|48OMs6=Ww2Sipd;f>)nEu}V3{!PBY^=jKs0M>m z|D|f|sx8^q^VWo8I;RV=zJEIT_WuQD=a^KINyHmGO8WScVML?sA{g^!6>t3B)_^bXGX3Xinuu%z6RQ6bk!arsXHY zo8(upRNB=yne}3ISSo-exL%o)k@)zlr0*wQ@7|3FR9z7J+}i`27W3s_3#7G~ox(D*a~c1IIe1{QF<( zBUMWcAhgl|CjKAWA{}G}DNitB)v+1#d*+2F2q)!omJ1%%!JlQBt1~n}w@l61+!FE` zaLbK`0dhliyU_*&o%eqAXsVL&D$pq($B)7_e)8jPQtHM^4`aI0D1_&A^GY1nn-+c!9J-IB)ASJr%n+ z{`^lqN+na5obz8$zq3J%c?Dy3)d27MT$c$HRtplq-dV?#H#4@)v7jI_>d&z8n4gAM zwcp>4W%^6p!ZV`ayu`ZAgfm1$ zY!xopx{x1pyd%RbD~z$l7`mCELdMU@?T{QiRKqbk{0ln2#tNqS=vUmP}BM4CH!lu*YqWY)y2G$n4gvom6ZA7Z(4vijjt-ABn_Cm z8O%c;Neq9557c7$#I|+$h}m*L)LBF>3XMi%MF9NTsLVGLYlxt7*e%4TFLB>qPxkUx z`862~@~o7n=w2T}NJ~Y04$k&gpHD{H{1c}$Z<38DUWRiVjjLIWj)TPRo^)2dPJZo1 zNNZQQ-K~tZoNBZ`2TU9S$_wOAzR9SFaL-Urs7l_Xj_CA@l0K^5^#1Mjg1YAqSHNGS{I15T_!jfXkhHK5!PgJHnze9)G`2q+Yt#u=9`g3( z3q^DHOFv2CI9(;nAf?-9!%;-(HlAXT!ShpM6yRO1plVaoHOLGI41B={c!+^TZEMdG zW2u^TezDEV;?lizi+m~F>?kYruG4Ep-dtAW5Mp8}zag?|_jiJ`mhW}~&Gv)G@+ZpM zZt8kp;5skG(rMPczOFLX+OD>nt}66N%Zla=j9)}&i6TDj8K^v+9md9D?uIBwqRrYF zTuh(78rQ;M90{CTy}nTT#$qC65r8rXXP@U&#p>Cu{0D9_~1#GvT8 z26X28dOd5kQPbsb9&(=WiD~S5lnoP^%wS4Tg9a@^U|V@VLq~mo;`J z&C6+6LQ&M>wDv+w~8YHtE(QC2@xEQ}N*rN99cmIm4Df zyiq+`>OP0jImL2}wUr_we_~UHpNaSc1F$pEM!S6n-RHKP?SMfq%i27)> zl?qyfv5L7BZThtVrrZv#lxF%?G)KW6@pRIsV%Ii%xvZr`m)=m9&Y>W>pH!R^B;a?t zn!VUqSTICyXP80(IlmNu63lMw>c#ng#_mcZVRGsWnS--H_~ z4*G%xcqz#R>HHSFet(fC`(YE9_u8OyK)7<&%N@pR+R}D#eRGYGbxd5%vX0IeFZF4E za6gAXgQb4C^|-FN9*}2q3h(Q{>&sD1PRcOb zWXDNlgqQ2uM3>t?Gl~}e2Gh@7FC;jH5d681DnXl+v5Ty~X=sherX9<<#f-h~OLP!1 zBkieSf-IUFZwi%^A`i>R&m!BQSHdUWUz{USzPM9v&OU2Tw@*=v1+4p;Jm25_a)N%5 zwl!$KKBJ7Rqk2bV(T+?$w$Juh@aFtH%uW(s@lXw5HwLeD3=&*rxviA?TfP*#PH!EM za7}yv(Txht1lww&lUui$;iP=n`B_gcAmp)C?$nrs4CGUtITg9i{vjQvj(|oZwdiSX z=u@Z&BZAf12V*nKBPY)){v%J05RJE0(9u!J0mnd;Em?(vFT$`*!4}yU9i+ub zj+U9KP8O45IFKFi*wA7@i;>UOvN;Dai;dHDSNP3GEtecH_)-xNN1um?EUiUl(o-Iu z)S6(^hPzmL#0wl#SwQl2>zenM^o?ow8k18#j%J6UjfwU1r7)-668{3H7)MALTavM- z)u4kQO3B6jDqv#YYw#aLY$av5O$uW+05j=nfq z^eCk`oRjL!`?SN}MrOO)EIR7e0|eCHKx0TDYMx=C<(p6J1C=QRJb0#@s7u)3cjU@% z6CahZ!3@p&pOl!j2ovoTR@;MzaUGXGYFQBOc-U%7sujv;NlLSd^t%Xz_#cK1^X$-U zw--L1+M!H79#NTVpQc|p3=w4C_Tx{2+Vv~GCs<3;(s38)=icnGP- zARf!xkZc_+!!I&Ojd$eR65?Hw%VSLd$` zpz-4LL4(42k*{Qz~ zn2=!R80q$3KKFJe2I7PwZ4)}JYkJE;D1 z#+lIo)>q)}V*E4VBct@XCU{+OuTI5%q?-nt(}bLi_)+dwuCb0RzL0pQkPWIQry(9Z z>|Att-@fcAE-_R1Z8k$Lx2~`kpEZp()sP_aFdryijs*606qtLuUP)0i)OjA8&h7AQ zLFm7atKMvGzG<@Ts|sb=7EAlx_R9yMM+|sgYsiMlQNZ61eg@^EN)h;0K$~YkM0~Xc zU3jmi;d;LIUONNd^9ZG|h%<-K>!IuYYIZpO=Xozti-WY}1Lq zg$JAUOTTL?ezz^a#J=wBr7xR9KLGc43Yk~4jRKX-YvGIo0$vqVP6lG}Lja<&LV;BD zV*h)Yh@cnr>?pQbRwYGBh18L!F7SuQq9KpY1(TRZs0T7#n@-|*0Lx+1hKuacv&g;4 z%U1WC_4+J$;-jIfidE@KW_VfC6Z0$u^X~QnCz^gI_B{i3l5J!VvR10^AkRei!Fvd_ z;=?sJq`&;Eqn0V#m!0o!IRXJx;bURYU}Loj$q$i_VM&U}yWn$}PiY#I=DLU%py~+Z z#i!!nHadS{zdiQByonBYrKA`_M>EzD6#|&BqBi%Z;Wmyj{KGq@H%x@~Id{7q zuFqou&9ek2IbAQI|F~9ul;ui#V-8lIg>_M@XP!8Fs85IOs*?;Ra-WzmS~jZaxyr3W zef#2dcM8x-_7m7JP+^+k&r9OH$1hLiMFO>Rw-jqP(wcpxKG$QU~?k%c>K(&SGH z%M-60A(hZCH1F0Ps z%|u1r?@FJ2Fq%?K;!?vs?etTpuS?t2c=9*DipCNsG=5zc8cI2fYlnthhLFOPj=s9( z1pI!xGTlYSi#ZMBrY-bg^|Don@~8r7B^(R9Okff?6k&r%^s(6Xo-+h>cDQ2B(kO@S z4`xa7?q@r>(-xx?TQ7DU(KOub%Ir1u7ZnKXr*T&FHlBkLb|@Gz=s6-@ZPXLeX*in> z3;GTzd2{|U459X?E}wJgA)xd?05v_2)&UqK^7S!W?4h(TWVXF3V*O81ne-!t7k;Ix zN1v2RWcJ4WsA%R8rJ(t37%kZ@l-E!lW=2QoZPfL;8X;pRu4%eO@2*-%*}U8D+;37U z2GuALr4M9r#`q3vp?hADMpe`!0qZp8!trt(tWAwWa3_SE#NgkJzoR+UMa-dxd!bHK zQh-sJ8ucJ2B?IyA>(Z@~NKd47#!KUwhf~-hImyPxrQ@u{N^_?i9q8uo1CHiqKDB5p z=Nheo3*JeiV0O<0Fhh8Plj99=l1tNUhIfPFUq0vesVr`AF~A*6MPoG*N8Q&r#wBPF0+_Cma9d_?|vv&&C(?IBAnGDp|hK zxok{AVXp?Xw)1?01|*8XU8PCNIU-B`J}PbO2mD&Qa{M}-xn91PgDIf}q1KFTiR?4T z$weLIveG$>FksltA2J8Q7)2@U@GuW-y_$7@YuUPvs%yMh1e1gtmpX&zRO2mt;m8X?~Di2bHB zqQ|-ZG!@}L_yy2{3HIPw|KuUd*B&oR%c-yLP-YBWs?5$KR65^e^-wWW`RIer{BB$K zI25c!&U(L&;VCqhsBC*D%X|NvtlwR8Evr;I4Cfc0zAH4v#mK@LdJQG(jE>toi-bF@ zBq9#ny8m5t%g=tTnTF`l!xZCHgwc$-8mL1mK0Q9j*tsW=t`tFSB~k`2BbCPhD47ef z;g!+R{Mz$nKQai3(X}~Q}(w)$Kz+?3PL93GX2U-j2Lyyb)Qwa6eyB>`~5yLPV={zb|^B6)RL=R~uB(O=?ksJqxZRGr@pQ7q}%GlF%99(okS z$jJ6^TeGDYTc7HL7JuX~z<9V!9jfHg5x{z9Y)dhU{$;Q|UCZ~{`u2J@H$g3HU%B_? zu>Hgqsp)Z9((`%Y@#NtNU3Ba_DGSBCul$}m9hsQdN4st|TMq#?TxEXVIhyuRI?NpM z1H#dT-NuveLYR+aYl3gp43I&L(qjcpsKB9s?eRG*I}GO8|3M9FCLdoutJqHlTw>{~ z;gsJt{QQVt)flw<#aseLxVTJ{pDCff1a!TDHaV8JMp>zR@n zFLL20j`jdxR?1mH))=`+MRJfS>dpB|sX2 zl0jL`Nr_dbjZZrCrPHfX9S4J77*poa&z4N-x<%BA7FgpBA!CN(&Q}1*X?P7~*3Sb# zM9wO(<>fa(Gu`OO$s&wwd=dLP9_*OMV-orLhYG$jhlpe^Hj!q>+fhkvJ?0Ojeoc+&#BfXND>#;UcotUc_M65Vva=2oiRUV(c<`Wm9+7|Ee0y z^S6JTF{6WYmmFK#Yk$8Ctv&)>WDK>n)V!IoQyQ`rrhM~E%$5m3NB-M$H701_Ef-0Q zD0N;S3knC{>kjQ{Sn@jY>mn{Syts+vcHk)k4k&1}pR7FZOwovn&)DxKe3tTl;<9xa zZrg8~>Vy*%yr~8?^}QmXHZfh1~=ASAt)kY2Z4efhToS zk6+U>vuwYk7xwU+QtyEIINGxY4bX8MaJQt5Ou z4&BZ6sSwgpt34`ZnYC-oqrNx^tM|L<$q~I;`Wzsu(NTAuqQtk_;xETr9@TQAVgfX} z>uU7m{YbBU+4_3v$xG^bi@ECQargaoRWkPZOnRuaA?C2Y7-fBFKlf{;2hfl7j4t4D z6iTDf&A9rwV-Ae^R=M1i?#nhyp6i&8KY6zD5DVPfG-@d zFHQO#s*gw;*!-5Dmuiyp^C zu8@Iz--_S$K&!_iOHNyr=~)3?!2Q#2!)U|bVAu8L6=HHVofW6k4a;L>ON&#u)w^~v zFaFDO-8L8phY|n0P#f5r=W7mw9kNw#in0}YxG1BN+8Y8euLWS%i z<@dw>TKV6Al3DksO>|ufLX0+UTnKV|eKdi3<6u_ATBkI8m*;FssX2Z4rhjLZ8Gm$y zlRYD(TkQ-el*Yf@UXkRRK-lZNlSS?`;5C|^D?UWB;azT_e|XB(kPSWBTT4HoMsQs| z7!5AW7&1tB%n|W;I-BH4FGSV99KPTw@e;(I)O^oooX&fjc|yU&;%@GKSb3~p$8Twa zCQN_wys()?xs=p))NXbBFcvL*_iSZ@62Q7Pcd`kT>HWRg9$-q<6545gJ-b|L(3RmC zyN;pYf9|HP40xz#^L#Q7y$Zb<4)?c`BAux(fT*>HncefWycQoL_cEOO*y*1TYFmEC z0VtZ9K*PNnv?_D&{4tn#?i#4+BL(3K=1f~m2u@z~4zxIrC30*V^F&yDwF9^ULu8l^ zSHC*os7mOpt34qHBJ@`pEgS2$aRI_P%PPQ6oU0 zjYmU+yQeN#Lk!6L*`mORo7?W(U>aXf`-|a-94no!PA;gOx{CV?88}m~KBj3rR=4Vk zu&61N+S)bcrc9jC)pUY%AxtHT5_J#DtTe;b2o3b!wt)6MaGQ2N|%wCFR_lwj@Z zm8}6gS&@we`6#LC&|agO0pGI~AB5=9#x43mI$S^&_I$^~*(+XL12(;b) z@A`95_TBkV?a!2Kq_oOb*G+U!V{8?fCro2^p0T?1m7>geu569x9+@{jcqFOSJvz$R zPayH#yuxNN=jZXYyK^`ZV|3pWBPtQESjbXI7Jk+SBlR&F6pLz#_qJB`JAk|?izba8zV|@=e*QYo{1GU zvOEd}UX&fyv-+j}6&JHmH$=r}*u zR=oZiusfiWy~5|`FkB+KNDx6v&Xgf~A5!u)2U!Lw)nP}0Pem=#nxeU+V1LB-Gy|v| z2#!Ora%YsNH-Qw+`fgH=Rc!fSocNlz2lQmsvqt8M4C93~gK+a#vNs!IzGk`xIGbQ` z)UKbz+BcBO`!`eEJ>0fWcs^FHHbF&8VS{#)$AoXbv~EfoHE|l{%5qvdH2uD}yS*z} zrmn4DIB$f;^jv;2cq4A>`-~WHys-ZH{HvqeB7UD3iIh~Y+TZC|zw;aYrk8W6t!&KG zhTSfq!c!kB?b(gO)#t=mpjv)dGUafGsneEO?)axG5<~yb%l7@jPV{|bWR%6rUY2!3 zDK3;%`RuXJC8&O#vN!d!{3dIaOGi!gFXu~xcj;f7jhA>D_HLT%-HAX!p}9SflV8bf zz8~@q@mcEgq|vyZplr&s&Y-bfn5n}VJA2o!zD?e)s*ZCL*`Vl6;RX$5?7+xg;BSwcM&rU%9lJ z%RI`htt%gADTN;|xE^IjqaU}#xy)o1Cm&+{Zc>yCQWT8Q^x`OloiA2QgasCLI2^$r zev9^$PS^lMCJDEAvaZ8~XC-a2-Be-sV3| zwJWW8okKGmI}Ipr9S`M$=@pL38`)pZ$!xE#{>)~t3N#(f2x(h2YL&2C6w~1jq>N8Y ztj8y$vNPpVhhVd5 zJq{;-@kXO&53ZzbZ+qKIbFr3pbDms1kPYAins<-AZZ9pPsl(kqj1q70>6fGCr&P!NtBp9kyVC+h=45^whpV-qW09u|Qr(4~fAb2WYEZzldvpL34U zP5d86bl677S6t10G@>^ppzv>RR$d!aS=y3Nn zo@iWsgU?m&3H`Kk*@->zx6W<4Cn3O^GPS}{%>ODgFedmmf~Kn?@*SclvR=nKb0Vp= z>SSA`!hJ;;5T-DE=H03O%K-Kw)q41^p-a&DQ{e2ySko_JE*jgS$tg_m)-o1|)lhv1 z%UtoAmO?!9PE1e(*ANbbyW2&wkXAAf?c*F;dQ?p-kH^ux5*{dAWQLCzx_ogqYBf6R zcb@bBbziR=wLBi;HMBkUdnpKf&aEikFPJr~b+*?>NF|MzqQd8N!w_D;5x5 z&28AOCN=VV7MX=YBTJD~`F(y(R@uAvrnS`qU4tVEouU^l*F}(hG;h7RHB!A^{dFi; z|0WWaOq+*Zlb+5kwe?pf4EyJJt&)zBi~8F9x=B+~OUAbqg0@;q9s`#iqZ*g^YylaV zKslsme=d|}y7z3jsQ9?^H!sCmmpMJ1)v-3x0o7ELWW=z&4f=m&Bs7W4vL2MP65h?8 z#yna^W^IvOL_Tw{HouE%ZKac2dR`RNQr5UdjU1Lq71)psti6q|B*!ySeglg79mww% zJJx6MBk<~R^{&TU%*WY{U#H40r${_BV$eMNjHlkd;q@e*m5!eFcMRgrOagGpZNS8t2T5#@zYoOP7ha#RkuAT zx{_&%Z-3vqtFBv{n5yUv0Ni!oHOp4D{mb{BK6Ut9X}oIT8^$r34zJwUb@KiQMg%w>L36^!ci`IwYl6fG&^FM?TLI@#*5F#YN^FMtjb3M?Hv{7d&djC(o z9@3ahDgusZ&=>Gb+I{a`l4*|!A(zX6k&)~F^`Z0scuxhMjb`flNYx=-HdST9YBiGLi&$>nmhcqyVPmLnYe0Dy-I>Z7Bh^rcTo?Fxm$<$qES zJX)YTqee$ZHAWF8A#>WL_01&jp`Hh}Eu3GM)NX8i+$qn8?BN0RPW)4;R8F2eX&92b zjW55lQ)C*t72%JznNs@t`Y8S(q;~nS@rWWhQl%ow<-SJ%$E}_wkzBI=zCM-WA42A~ z(eYeF)c_#E)pp~24mX`E5v~J3n-&WB)Rvd+>M($ExoVasa_I&K2J#B^L}$%A@sFY? zjg5`*xMbmr%ddAMKfb%rcQ|jC1bRqngs-}}zN4dq;vYh4*RJZ#CwgZ4D#$czZ|~5k z>IETlTW)xy`Ot9ySQ?6bvT@ODUk;fbEiDZZBfy(bg6>`MPbPD@^V?pE)mJqlW8JS0 zm&XLAXhU_{mOENCjn-TeBA_)Ey}Pr0wy%OrGhNdZ+JH;Q+?EZ8CT_nCn%SO|DQiaN zAk)xPmDaz_8p?l72qA{>h4d9twTve%}Lp)LGjNl3h+zsdamcmQr-USg5JCtcgalTA;~&c^S{0e(^tr+i0A+n3gb+dqA!NY`aQp)Z zvJ2N~@`n&Y2qA=!g($%J58CQiUA4h*KzIsB$GA@jG(0%=$OA77_Ku34HdzGrrLdw#FZ#m{fJSr(7P6 zEelK5tq#~1XvhIzgPUs9WOfP!&ZQ)^k}iX$ zDvZ)g1XO_IA8v*8LdHYken0m@KJWB_bEDt>@*`t|InmQblA*4p%|7wZ;TN6JUg3I6 zT!nI;`NThapE>vF*AH9y>i5JdQXQsG{4>~7c=#&^^Zl0SX~P+qHpP*M0GL!mVXgr{mKh;? zL3GeBC6gsb$zd;r%f~;C?0uU|ivY(z=Z3mpKlp6f85eCB*IPVTUhS>!IBqd7Jm~=d zrF^+!`2``bzW&U~qdlH0HUXsRdL_Pm6Ktqj5ab1QXemz7IP#=^hr>8p4{F-4`8e!bK3!cH!I~g#|P!wx>Wy~ zka=d}F0}AI+L!Vh+p>*q$0=8YN1rE9_%pi#9RJic)vw!frH8`rJnrf~aeg$M+;Qdo z=!x%)LN}Xg_IZ5#ASx%@z!Mu}aFzeRy?g6zpF7lMzSupC`yzl zk;}c!^u<9MM;vN4wdu9a2LT4e+2x9`R&(~8ITLp`RRhZ7)k`biG!5-Q#Sc}Fk))JC zFF5u)x~DZ=L!IQBesXs1iA%btjgzQ4QB@3RZtmV$ZHQR=juIZL=AEW=M#T&=?cWqB zVgt@Z)d3l{vpClwgeWZKMi!hB08AM4XXwHD3U^<+Zx2(3(V;43 zlqAW1Yp2uIUjO4dg(!MTF~IPP2E@8KR+(5l$SVc}q2FlUPO+(x5$f>3*@gJtFpkCp)Xrhqo%zuk$pbQ>UWu%_Z?0QyE& zyL%g}|M{!_!WZzoBEXydUbS*|ZKiIT`teT`+sdF+-1huk-LnE3rf8~e)1@iT8MS+@ z=u}0(^4?NOWKdhGCc zruFb>J?#6aH^)pw1jH-(F`HBTL(ZZM$3J&&Z+!UsEvEnIB2I!g;KhEEST8?v<7E*$4s1pI)73l>8Iinc0e+=*3HXx&ct`n4OKruLRE`KtSkR~G>Krb{k<>mnrDD>8+MC+Ai9lh-49jr*V_Jr3ins^<4g?LE zbU^k`t}q*nQOHlBT?EHdO`}cj)V*bFYfsRn8{|w?jnsPk{f%Hov#CDY_a6k@UK~#`ezMo#w%Qt9LxD zh=0cGwItN8T9i_blvFqCUMq^b`sp zZ}1bNAViOuKdml2^lKvIA<1z3BO>&h&|Q$61EspS4f_`R~6Nqrtp9O{tmy5rPQe@IzSTzj_m1?>Cp{ zCg#U^iiVJuU`9?AStX#oMJWJC5CT+<3Z@IsJXgHPd)oDH(YUJaYy5oi`){q>dXJmd z;0i-TT9{=OCY9Qyaq^OR+5gb+dq87AZ@XF`NzM26!ZTZ~vDF-nl1!CV!%r3ybp0#O%^ z*JN)43SnI_0XHltjN?&5yL2gEK~t6Xfcn^C*wJ720 zA@bqyZkqb7cH>m*{`a=U%|DCd21G&McVP@H565P4W|#rCz& zzk{FsgM1-3^7`n7;anIY|0fxae-P2BV{sAf5+EPk$iyr{u7XDudH9hsol?Q)e`2Iq zxy~3^uIeJnQ^w9ssk=tScCk=ZUGUdmF`j;br`V{+P)2J~$Vk6R4FIB^6p2)K)%@27 z$&GhK=u4Rl$l&3gJlKZjnz(p{*XC(EQ9=kI-%@Z%@dhCd2WM zF$&%sK<|mf`B#Mt!mG9R2cZW)f>>R1m?i zR6n8Zf~}0Z(vdRl^8G;vHn+v~-zMSd8f!}daRALVdCL}F84FZ9M+hN=422XRrK;I3 zAsLb3_$N1pc16*X>}1HHxS%>F*oV#ENU7>LaI((3(D@<2V&4Il1GLu|0IdEY(d{TL z1?+E$?Nwa3Oclw5d=WCL)F*^I4ELs7G?V!p?V^-X87?R?a*(b)qpjMc2h1XrR?_@; zE-s<1+E9nwv%jgk2ND24*oE%?1GpMOGLQcJ5<}z>c0000+Ng+ literal 0 HcmV?d00001 diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 1673441db8..5a98fc1c18 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -8,6 +8,7 @@ These features allows users to ensure high throughput of their data processing while also controlling the amount of computing resources a given workflow will use. + Specifying Resources in the Node Interface ========================================== Each ``Node`` instance interface has two parameters that specify its expected @@ -23,6 +24,7 @@ particular node is expected to use 8 threads and 2 GB of memory: If the resource parameters are never set, they default to being 1 thread and 1 GB of RAM. + Resource Scheduler ================== The ``MultiProc`` workflow plugin schedules node execution based on the @@ -43,28 +45,29 @@ for ``n_procs`` and ``memory_gb``, respectively. The plugin will then queue eligible nodes for execution based on their expected usage via the ``num_threads`` and ``estimated_memory_gb`` interface parameters. If the plugin sees that only 3 of its 4 processors and 4 GB of its 6 GB of RAM -are being used, it will attempt to execute the next available node as long as -its ``num_threads = 1`` and ``estimated_memory_gb <= 2``. If this is not the -case, it will continue to check every available node in the queue until it sees -a node that meets these conditions or it waits for a executing node to finish to -earn back the necessary resources. The priority of the queue is highest for -nodes with the most ``estimated_memory_gb`` followed by nodes with the most -expected ``num_threads``. +are being used by running nodes, it will attempt to execute the next available +node as long as its ``num_threads = 1`` and ``estimated_memory_gb <= 2``. If +this is not the case, it will continue to check every available node in the +queue until it sees a node that meets these conditions, or it waits for an +executing node to finish to earn back the necessary resources. The priority of +the queue is highest for nodes with the most ``estimated_memory_gb`` followed +by nodes with the most expected ``num_threads``. + Runtime Profiler and using the Callback Log =========================================== It is not always easy to estimate the amount of resources a particular function or command uses. To help with this, Nipype provides some feedback about the system resources used by every node during workflow execution via the built-in -runtime profiler. The runtime profiler is automatically enabled if the] +runtime profiler. The runtime profiler is automatically enabled if the ``psutil`` Python package is installed and found on the system. If the package is not found, the workflow will run normally without the runtime profiler. The runtime profiler records the number of threads and the amount of memory (GB) used as ``runtime_threads`` and ``runtime_memory_gb`` in the Node's -``result.runtime`` parameter. Since the node object is pickled and written to +``result.runtime`` attribute. Since the node object is pickled and written to disk in its working directory, these values are available for analysis after -node or workflow execution by parsing the pickle file contents in Python. +node or workflow execution by manually parsing the pickle file contents. Nipype also provides a logging mechanism for saving node runtime statistics to a JSON-style log file via the ``log_nodes_cb`` logger function. This is enabled @@ -88,7 +91,7 @@ configured. handler = logging.FileHandler(callback_log_path) logger.addHandler(handler) -Finally, the workflow can be ran. +Finally, the workflow can be run. :: workflow.run(plugin='MultiProc', plugin_args=args_dict) @@ -112,6 +115,7 @@ the user can change the node interface ``num_threads`` and ``estimated_memory_gb`` parameters to reflect this for a higher pipeline throughput. + Visualizing Pipeline Resources ============================== Nipype provides the ability to visualize the workflow execution based on the @@ -131,4 +135,9 @@ generated from the callback logger after workflow execution - as shown above. # ...creates gantt chart in '/home/user/run_stats.log.html' The `generate_gantt_chart`` function will create an html file that can be viewed -in a browser. \ No newline at end of file +in a browser. Below is an example of the gantt chart displayed in a web browser. +Note that when the cursor is hovered over any particular node bubble or resource +bubble, some additional information is shown in a pop-up. + + * - .. image:: images/gantt_chart.png + :width: 100 % \ No newline at end of file From 7513f402f7d0b7f23a0cf2e326ad95492c19f27b Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 25 Apr 2016 10:06:52 -0400 Subject: [PATCH 56/78] Updated docs with dependencies --- doc/users/resource_sched_profiler.rst | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 5a98fc1c18..cccda6928b 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -60,8 +60,12 @@ It is not always easy to estimate the amount of resources a particular function or command uses. To help with this, Nipype provides some feedback about the system resources used by every node during workflow execution via the built-in runtime profiler. The runtime profiler is automatically enabled if the -``psutil`` Python package is installed and found on the system. If the package -is not found, the workflow will run normally without the runtime profiler. +psutil_ Python package is installed and found on the system. + +.. _psutil: https://pythonhosted.org/psutil/ + +If the package is not found, the workflow will run normally without the runtime +profiler. The runtime profiler records the number of threads and the amount of memory (GB) used as ``runtime_threads`` and ``runtime_memory_gb`` in the Node's @@ -121,6 +125,9 @@ Visualizing Pipeline Resources Nipype provides the ability to visualize the workflow execution based on the runtimes and system resources each node takes. It does this using the log file generated from the callback logger after workflow execution - as shown above. +The pandas_ Python package is required to use this feature. + +.. _pandas: http://pandas.pydata.org/ :: from nipype.pipeline.plugins.callback_log import log_nodes_cb From 3814ae939cbcbcd8c93ef240a8314c142818911d Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 25 Apr 2016 10:13:27 -0400 Subject: [PATCH 57/78] Moved gantt chart to proper images folder --- doc/{ => users}/images/gantt_chart.png | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/{ => users}/images/gantt_chart.png (100%) diff --git a/doc/images/gantt_chart.png b/doc/users/images/gantt_chart.png similarity index 100% rename from doc/images/gantt_chart.png rename to doc/users/images/gantt_chart.png From 0189dd8823f7168bcff60e58bdfb72ca0b41b14a Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 25 Apr 2016 11:11:43 -0400 Subject: [PATCH 58/78] Fixed some failing unit tests --- nipype/interfaces/io.py | 2 +- .../interfaces/tests/test_runtime_profiler.py | 14 ++++++-------- .../pipeline/plugins/tests/test_multiproc.py | 18 ++++++++++-------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 7bc0ff358c..0035971f31 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -211,7 +211,7 @@ class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): encrypt_bucket_keys = traits.Bool(desc='Flag indicating whether to use S3 '\ 'server-side AES-256 encryption') # Set this if user wishes to override the bucket with their own - bucket = traits.Str(desc='Boto3 S3 bucket for manual override of bucket') + bucket = traits.Any(desc='Boto3 S3 bucket for manual override of bucket') # Set this if user wishes to have local copy of files as well local_copy = traits.Str(desc='Copy files locally as well as to S3 bucket') diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 71ccbd9e0b..639a5c218f 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -127,9 +127,9 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb = 6 + self.num_gb = 2 # Input number of sub-threads (not including parent threads) - self.num_threads = 7 + self.num_threads = 4 # Acceptable percent error for memory profiled against input self.mem_err_percent = 5 @@ -379,9 +379,8 @@ def test_cmdline_profiling(self): # Get margin of error for RAM GB allowed_gb_err = (self.mem_err_percent/100.0)*num_gb runtime_gb_err = np.abs(runtime_gb-num_gb) - # Runtime threads should reflect shell-cmd thread, Python parent thread - # and Python sub-threads = 1 + 1 + num_threads - expected_runtime_threads = 1 + 1 + num_threads + # + expected_runtime_threads = num_threads # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ @@ -421,9 +420,8 @@ def test_function_profiling(self): # Get margin of error for RAM GB allowed_gb_err = (self.mem_err_percent/100.0)*num_gb runtime_gb_err = np.abs(runtime_gb-num_gb) - # Runtime threads should reflect Python parent thread - # and Python sub-threads = 1 + num_threads - expected_runtime_threads = 1 + num_threads + # + expected_runtime_threads = num_threads # Error message formatting mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 264753c8a0..6c08862523 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -88,8 +88,8 @@ def find_metrics(nodes, last_node): from dateutil.parser import parse import datetime - start = parse(nodes[0]['start']) - total_duration = int((parse(last_node['finish']) - start).total_seconds()) + start = nodes[0]['start'] + total_duration = int((last_node['finish'] - start).total_seconds()) total_memory = [] total_threads = [] @@ -106,12 +106,12 @@ def find_metrics(nodes, last_node): x = now for j in range(start_index, len(nodes)): - node_start = parse(nodes[j]['start']) - node_finish = parse(nodes[j]['finish']) + node_start = nodes[j]['start'] + node_finish = nodes[j]['finish'] if node_start < x and node_finish > x: - total_memory[i] += nodes[j]['estimated_memory_gb'] - total_threads[i] += nodes[j]['num_threads'] + total_memory[i] += float(nodes[j]['estimated_memory_gb']) + total_threads[i] += int(nodes[j]['num_threads']) start_index = j if node_start > x: @@ -154,7 +154,8 @@ def test_do_not_use_more_memory_then_specified(): 'status_callback': log_nodes_cb}) - nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) + nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) + last_node = nodes[-1] #usage in every second memory, threads = find_metrics(nodes, last_node) @@ -210,7 +211,8 @@ def test_do_not_use_more_threads_then_specified(): pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, 'status_callback': log_nodes_cb}) - nodes, last_node = draw_gantt_chart.log_to_json(LOG_FILENAME) + nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) + last_node = nodes[-1] #usage in every second memory, threads = find_metrics(nodes, last_node) From de3b298ea8b9be87539146594effc06e68da0afd Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 26 Apr 2016 16:11:43 -0400 Subject: [PATCH 59/78] Modified thread-monitoring logic and ensured unit tests pass --- doc/users/resource_sched_profiler.rst | 7 +++++- nipype/interfaces/base.py | 24 ++++++++++++++++--- .../interfaces/tests/test_runtime_profiler.py | 11 +++++---- nipype/interfaces/tests/use_resources | 6 +++-- nipype/utils/draw_gantt_chart.py | 9 +++++-- 5 files changed, 45 insertions(+), 12 deletions(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index cccda6928b..7aacad9766 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -117,7 +117,12 @@ Here it can be seen that the number of threads was underestimated while the amount of memory needed was overestimated. The next time this workflow is run the user can change the node interface ``num_threads`` and ``estimated_memory_gb`` parameters to reflect this for a higher pipeline -throughput. +throughput. Note, sometimes the "runtime_threads" value is higher than expected, +particularly for multi-threaded applications. Tools can implement +multi-threading in different ways under-the-hood; the profiler merely traverses +the process tree to return all running threads associated with that process, +some of which may include active thread-monitoring daemons or transient +processes. Visualizing Pipeline Resources diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index c93829ffb8..16b1c95293 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1216,16 +1216,34 @@ def _get_num_threads(proc): # If process is running if proc.status() == psutil.STATUS_RUNNING: num_threads = proc.num_threads() + elif proc.num_threads() > 1: + tprocs = [psutil.Process(thr.id) for thr in proc.threads()] + alive_tprocs = [tproc for tproc in tprocs if tproc.status() == psutil.STATUS_RUNNING] + num_threads = len(alive_tprocs) else: - num_threads = 0 + num_threads = 1 # Try-block for errors try: child_threads = 0 # Iterate through child processes and get number of their threads for child in proc.children(recursive=True): - if child.status() == psutil.STATUS_RUNNING and len(child.children()) == 0: - child_threads += child.num_threads() + # Leaf process + if len(child.children()) == 0: + # If process is running, get its number of threads + if child.status() == psutil.STATUS_RUNNING: + child_thr = child.num_threads() + # If its not necessarily running, but still multi-threaded + elif child.num_threads() > 1: + # Cast each thread as a process and check for only running + tprocs = [psutil.Process(thr.id) for thr in child.threads()] + alive_tprocs = [tproc for tproc in tprocs if tproc.status() == psutil.STATUS_RUNNING] + child_thr = len(alive_tprocs) + # Otherwise, no threads are running + else: + child_thr = 0 + # Increment child threads + child_threads += child_thr # Catch any NoSuchProcess errors except psutil.NoSuchProcess: pass diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 639a5c218f..46d75a0109 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -78,6 +78,7 @@ def _use_gb_ram(num_gb): # Import packages from threading import Thread + from multiprocessing import Process # Init variables num_gb = float(num_gb) @@ -85,11 +86,13 @@ def _use_gb_ram(num_gb): # Build thread list thread_list = [] for idx in range(num_threads): - thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), name=str(idx)) + thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), + name=str(idx)) thread_list.append(thread) # Run multi-threaded - print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads) + print('Using %.3f GB of memory over %d sub-threads...' % \ + (num_gb, num_threads)) for idx, thread in enumerate(thread_list): thread.start() @@ -127,11 +130,11 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb = 2 + self.num_gb = 4 # Input number of sub-threads (not including parent threads) self.num_threads = 4 # Acceptable percent error for memory profiled against input - self.mem_err_percent = 5 + self.mem_err_percent = 10 # ! Only used for benchmarking the profiler over a range of # ! RAM usage and number of threads diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index 60639398ac..01f1e176bb 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -35,6 +35,7 @@ if __name__ == '__main__': # Import packages import argparse from threading import Thread + from multiprocessing import Process # Init argparser parser = argparse.ArgumentParser(description=__doc__) @@ -55,10 +56,11 @@ if __name__ == '__main__': # Build thread list thread_list = [] for idx in range(num_threads): - thread_list.append(Thread(target=use_gb_ram, args=(num_gb/num_threads,))) + thread_list.append(Process(target=use_gb_ram, args=(num_gb/num_threads,))) # Run multi-threaded - print 'Using %.3f GB of memory over %d sub-threads...' % (num_gb, num_threads) + print('Using %.3f GB of memory over %d sub-threads...' % \ + (num_gb, num_threads)) for thread in thread_list: thread.start() diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 7a8fd259d9..43ec07c317 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -9,9 +9,14 @@ from dateutil import parser import datetime import random -import pandas as pd from collections import OrderedDict - +# Pandas +try: + import pandas as pd +except ImportError: + print('Pandas not found; in order for full functionality of this module '\ + 'install the pandas package') + pass def create_event_dict(start_time, nodes_list): ''' From 6f4c2e770d3f92b908f5f238df0b2fab268b0b18 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 26 Apr 2016 18:39:48 -0400 Subject: [PATCH 60/78] Lowered memory usage for unit tests --- nipype/interfaces/tests/test_runtime_profiler.py | 12 ++++++------ nipype/interfaces/tests/use_resources | 2 +- nipype/utils/draw_gantt_chart.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 46d75a0109..2bc05282a3 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -69,7 +69,7 @@ def _use_gb_ram(num_gb): # Spin CPU ctr = 0 - while ctr < 50e6: + while ctr < 30e6: ctr += 1 # Clear memory @@ -77,8 +77,8 @@ def _use_gb_ram(num_gb): del gb_str # Import packages - from threading import Thread from multiprocessing import Process + from threading import Thread # Init variables num_gb = float(num_gb) @@ -86,7 +86,7 @@ def _use_gb_ram(num_gb): # Build thread list thread_list = [] for idx in range(num_threads): - thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), + thread = Process(target=_use_gb_ram, args=(num_gb/num_threads,), name=str(idx)) thread_list.append(thread) @@ -130,11 +130,11 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb = 4 + self.num_gb = 1.0 # Input number of sub-threads (not including parent threads) - self.num_threads = 4 + self.num_threads = 2 # Acceptable percent error for memory profiled against input - self.mem_err_percent = 10 + self.mem_err_percent = 25 # ! Only used for benchmarking the profiler over a range of # ! RAM usage and number of threads diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index 01f1e176bb..e5e1d8720a 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -21,7 +21,7 @@ def use_gb_ram(num_gb): # Spin CPU ctr = 0 - while ctr < 10e6: + while ctr < 30e6: ctr+= 1 # Clear memory diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 43ec07c317..b99266c8e4 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -10,7 +10,7 @@ import datetime import random from collections import OrderedDict -# Pandas +# Pandas try: import pandas as pd except ImportError: From 0cca692ca5135730744338c99cd204662c66f6af Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 27 Apr 2016 12:03:05 -0400 Subject: [PATCH 61/78] Changed runtime unit test tolerance to GB instead of % --- .../interfaces/tests/test_runtime_profiler.py | 18 +++++++++--------- nipype/interfaces/tests/use_resources | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 2bc05282a3..2b6975ef47 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -86,7 +86,7 @@ def _use_gb_ram(num_gb): # Build thread list thread_list = [] for idx in range(num_threads): - thread = Process(target=_use_gb_ram, args=(num_gb/num_threads,), + thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), name=str(idx)) thread_list.append(thread) @@ -130,11 +130,11 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb = 1.0 + self.num_gb = 0.5 # Input number of sub-threads (not including parent threads) self.num_threads = 2 # Acceptable percent error for memory profiled against input - self.mem_err_percent = 25 + self.mem_err_gb = 0.25 # ! Only used for benchmarking the profiler over a range of # ! RAM usage and number of threads @@ -380,14 +380,14 @@ def test_cmdline_profiling(self): runtime_threads = int(node_stats['runtime_threads']) # Get margin of error for RAM GB - allowed_gb_err = (self.mem_err_percent/100.0)*num_gb + allowed_gb_err = self.mem_err_gb runtime_gb_err = np.abs(runtime_gb-num_gb) # expected_runtime_threads = num_threads # Error message formatting - mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ - 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) + mem_err = 'Input memory: %f is not within %.3f GB of runtime '\ + 'memory: %f' % (num_gb, self.mem_err_gb, runtime_gb) threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ % (expected_runtime_threads, runtime_threads) @@ -421,14 +421,14 @@ def test_function_profiling(self): runtime_threads = int(node_stats['runtime_threads']) # Get margin of error for RAM GB - allowed_gb_err = (self.mem_err_percent/100.0)*num_gb + allowed_gb_err = self.mem_err_gb runtime_gb_err = np.abs(runtime_gb-num_gb) # expected_runtime_threads = num_threads # Error message formatting - mem_err = 'Input memory: %f is not within %.1f%% of runtime '\ - 'memory: %f' % (num_gb, self.mem_err_percent, runtime_gb) + mem_err = 'Input memory: %f is not within %.3f GB of runtime '\ + 'memory: %f' % (num_gb, self.mem_err_gb, runtime_gb) threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ % (expected_runtime_threads, runtime_threads) diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources index e5e1d8720a..06e2d3e906 100755 --- a/nipype/interfaces/tests/use_resources +++ b/nipype/interfaces/tests/use_resources @@ -56,7 +56,7 @@ if __name__ == '__main__': # Build thread list thread_list = [] for idx in range(num_threads): - thread_list.append(Process(target=use_gb_ram, args=(num_gb/num_threads,))) + thread_list.append(Thread(target=use_gb_ram, args=(num_gb/num_threads,))) # Run multi-threaded print('Using %.3f GB of memory over %d sub-threads...' % \ From 8f1b104275557c7d07c5804caa679236a2515195 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Wed, 27 Apr 2016 15:02:36 -0400 Subject: [PATCH 62/78] Added AttributeError to exception for more specific error handling --- nipype/pipeline/engine/nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 975e6f99f9..531b745747 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -744,7 +744,7 @@ def write_report(self, report_type=None, cwd=None): try: rst_dict['runtime_memory_gb'] = self.result.runtime.runtime_memory_gb rst_dict['runtime_threads'] = self.result.runtime.runtime_threads - except: + except AttributeError: logger.info('Runtime memory and threads stats unavailable') if hasattr(self.result.runtime, 'cmdline'): rst_dict['command'] = self.result.runtime.cmdline From ffe30dae451b105a42f1e70a83112b5d164bd1f4 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 28 Apr 2016 10:28:46 -0400 Subject: [PATCH 63/78] Removed unexpected indentation from resource scheduler and profiler user docs --- doc/users/resource_sched_profiler.rst | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 7aacad9766..c13d729290 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -16,6 +16,7 @@ thread and memory usage: ``num_threads`` and ``estimated_memory_gb``. If a particular node is expected to use 8 threads and 2 GB of memory: :: + import nipype.pipeline.engine as pe node = pe.Node() node.interface.num_threads = 8 @@ -34,6 +35,7 @@ the workflow. The plugin utilizes the plugin arguments ``n_procs`` and workflow to using 4 cores and 6 GB of RAM: :: + args_dict = {'n_procs' : 4, 'memory_gb' : 6} workflow.run(plugin='MultiProc', plugin_args=args_dict) @@ -79,14 +81,15 @@ by setting the ``status_callback`` parameter to point to this function in the ``plugin_args`` when using the ``MultiProc`` plugin. :: + from nipype.pipeline.plugins.callback_log import log_nodes_cb - args_dict = {'n_procs' : 4, 'memory_gb' : 6, - 'status_callback' : log_nodes_cb} + args_dict = {'n_procs' : 4, 'memory_gb' : 6, 'status_callback' : log_nodes_cb} To set the filepath for the callback log the ``'callback'`` logger must be configured. :: + # Set path to log file import logging callback_log_path = '/home/user/run_stats.log' @@ -98,6 +101,7 @@ configured. Finally, the workflow can be run. :: + workflow.run(plugin='MultiProc', plugin_args=args_dict) After the workflow finishes executing, the log file at @@ -105,6 +109,7 @@ After the workflow finishes executing, the log file at example of what the contents would look like: :: + {"name":"resample_node","id":"resample_node", "start":"2016-03-11 21:43:41.682258", "estimated_memory_gb":2,"num_threads":1} @@ -135,16 +140,16 @@ The pandas_ Python package is required to use this feature. .. _pandas: http://pandas.pydata.org/ :: + from nipype.pipeline.plugins.callback_log import log_nodes_cb - args_dict = {'n_procs' : 4, 'memory_gb' : 6, - 'status_callback' : log_nodes_cb} + args_dict = {'n_procs' : 4, 'memory_gb' : 6, 'status_callback' : log_nodes_cb} workflow.run(plugin='MultiProc', plugin_args=args_dict) # ...workflow finishes and writes callback log to '/home/user/run_stats.log' from nipype.utils.draw_gantt_chart import generate_gantt_chart generate_gantt_chart('/home/user/run_stats.log', cores=4) - # ...creates gantt chart in '/home/user/run_stats.log.html' + # ...creates gantt chart in '/home/user/run_stats.log.html' The `generate_gantt_chart`` function will create an html file that can be viewed in a browser. Below is an example of the gantt chart displayed in a web browser. From cf08147051c54588a751ee9968e0263522d5a2b0 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 28 Apr 2016 10:51:58 -0400 Subject: [PATCH 64/78] Updated docs to reflect png --- doc/users/resource_sched_profiler.rst | 14 +++++++------- nipype/interfaces/tests/test_runtime_profiler.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index c13d729290..0382fa6624 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -32,11 +32,11 @@ The ``MultiProc`` workflow plugin schedules node execution based on the resources used by the current running nodes and the total resources available to the workflow. The plugin utilizes the plugin arguments ``n_procs`` and ``memory_gb`` to set the maximum resources a workflow can utilize. To limit a -workflow to using 4 cores and 6 GB of RAM: +workflow to using 8 cores and 10 GB of RAM: :: - args_dict = {'n_procs' : 4, 'memory_gb' : 6} + args_dict = {'n_procs' : 8, 'memory_gb' : 10} workflow.run(plugin='MultiProc', plugin_args=args_dict) If these values are not specifically set then the plugin will assume it can @@ -46,9 +46,9 @@ for ``n_procs`` and ``memory_gb``, respectively. The plugin will then queue eligible nodes for execution based on their expected usage via the ``num_threads`` and ``estimated_memory_gb`` interface parameters. -If the plugin sees that only 3 of its 4 processors and 4 GB of its 6 GB of RAM +If the plugin sees that only 3 of its 8 processors and 4 GB of its 10 GB of RAM are being used by running nodes, it will attempt to execute the next available -node as long as its ``num_threads = 1`` and ``estimated_memory_gb <= 2``. If +node as long as its ``num_threads <= 5`` and ``estimated_memory_gb <= 6``. If this is not the case, it will continue to check every available node in the queue until it sees a node that meets these conditions, or it waits for an executing node to finish to earn back the necessary resources. The priority of @@ -83,7 +83,7 @@ by setting the ``status_callback`` parameter to point to this function in the :: from nipype.pipeline.plugins.callback_log import log_nodes_cb - args_dict = {'n_procs' : 4, 'memory_gb' : 6, 'status_callback' : log_nodes_cb} + args_dict = {'n_procs' : 8, 'memory_gb' : 10, 'status_callback' : log_nodes_cb} To set the filepath for the callback log the ``'callback'`` logger must be configured. @@ -142,13 +142,13 @@ The pandas_ Python package is required to use this feature. :: from nipype.pipeline.plugins.callback_log import log_nodes_cb - args_dict = {'n_procs' : 4, 'memory_gb' : 6, 'status_callback' : log_nodes_cb} + args_dict = {'n_procs' : 8, 'memory_gb' : 10, 'status_callback' : log_nodes_cb} workflow.run(plugin='MultiProc', plugin_args=args_dict) # ...workflow finishes and writes callback log to '/home/user/run_stats.log' from nipype.utils.draw_gantt_chart import generate_gantt_chart - generate_gantt_chart('/home/user/run_stats.log', cores=4) + generate_gantt_chart('/home/user/run_stats.log', cores=8) # ...creates gantt chart in '/home/user/run_stats.log.html' The `generate_gantt_chart`` function will create an html file that can be viewed diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 2b6975ef47..b9adfe5c19 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -130,7 +130,7 @@ def setUp(self): # Init parameters # Input RAM GB to occupy - self.num_gb = 0.5 + self.num_gb = 1.0 # Input number of sub-threads (not including parent threads) self.num_threads = 2 # Acceptable percent error for memory profiled against input From 35bdb2d4f446463eae01d113bce3b1d9729819ba Mon Sep 17 00:00:00 2001 From: dclark87 Date: Fri, 29 Apr 2016 17:25:51 -0400 Subject: [PATCH 65/78] Fixed some errors --- nipype/pipeline/plugins/callback_log.py | 61 ++++++++++++++----------- nipype/pipeline/plugins/multiproc.py | 17 ++++--- nipype/utils/draw_gantt_chart.py | 22 ++------- 3 files changed, 48 insertions(+), 52 deletions(-) diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py index d4445a28c9..14287bda07 100644 --- a/nipype/pipeline/plugins/callback_log.py +++ b/nipype/pipeline/plugins/callback_log.py @@ -3,18 +3,31 @@ """Callback logger for recording workflow and node run stats """ -# Import packages -import datetime -import logging # Log node stats function def log_nodes_cb(node, status): """Function to record node run statistics to a log file as json dictionaries + + Parameters + ---------- + node : nipype.pipeline.engine.Node + the node being logged + status : string + acceptable values are 'start', 'end'; otherwise it is + considered and error + + Returns + ------- + None + this function does not return any values, it logs the node + status info to the callback logger """ - # Init variables - logger = logging.getLogger('callback') + # Import packages + import datetime + import logging + import json # Check runtime profile stats if node.result is not None: @@ -22,35 +35,31 @@ def log_nodes_cb(node, status): runtime = node.result.runtime runtime_memory_gb = runtime.runtime_memory_gb runtime_threads = runtime.runtime_threads - except: - runtime_memory_gb = runtime_threads = 'Unkown' + except AttributeError: + runtime_memory_gb = runtime_threads = 'Unknown' else: runtime_memory_gb = runtime_threads = 'N/A' + # Init variables + logger = logging.getLogger('callback') + status_dict = {'name' : node.name, + 'id' : node._id, + 'estimated_memory_gb' : node._interface.estimated_memory_gb, + 'num_threads' : node._interface.num_threads} + # Check status and write to log # Start if status == 'start': - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' +\ - node._id + '"' + ',"start":' + '"' +str(datetime.datetime.now()) +\ - '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ - ',"num_threads":' + str(node._interface.num_threads) + '}' - - logger.debug(message) + status_dict['start'] = str(datetime.datetime.now()) # End elif status == 'end': - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) + \ - '"' + ',"estimated_memory_gb":' + '"'+ str(node._interface.estimated_memory_gb) + \ - '"'+ ',"num_threads":' + '"'+ str(node._interface.num_threads) + '"'+ \ - ',"runtime_threads":' + '"'+ str(runtime_threads) + '"'+ \ - ',"runtime_memory_gb":' + '"'+ str(runtime_memory_gb) + '"' + '}' - - logger.debug(message) + status_dict['finish'] = str(datetime.datetime.now()) + status_dict['runtime_threads'] = runtime_threads + status_dict['runtime_memory_gb'] = runtime_memory_gb # Other else: - message = '{"name":' + '"' + node.name + '"' + ',"id":' + '"' + \ - node._id + '"' + ',"finish":' + '"' + str(datetime.datetime.now()) +\ - '"' + ',"estimated_memory_gb":' + str(node._interface.estimated_memory_gb) + \ - ',"num_threads":' + str(node._interface.num_threads) + ',"error":"True"}' + status_dict['finish'] = str(datetime.datetime.now()) + status_dict['error'] = True - logger.debug(message) + # Dump string to log + logger.debug(json.dumps(status_dict)) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index eef7dacb3d..eea491e898 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -124,8 +124,8 @@ class MultiProcPlugin(DistributedPluginBase): Currently supported options are: - non_daemon : boolean flag to execute as non-daemon processes - - num_threads: maximum number of threads to be executed in parallel - - estimated_memory_gb: maximum memory (in GB) that can be used at once. + - n_procs: maximum number of threads to be executed in parallel + - memory_gb: maximum memory (in GB) that can be used at once. """ @@ -180,11 +180,8 @@ def _clear_task(self, taskid): def _submit_job(self, node, updatehash=False): self._taskid += 1 - try: - if node.inputs.terminal_output == 'stream': - node.inputs.terminal_output = 'allatonce' - except: - pass + if node.inputs.terminal_output == 'stream': + node.inputs.terminal_output = 'allatonce' self._taskresult[self._taskid] = \ self.pool.apply_async(run_node, @@ -282,8 +279,10 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): % self.procs[jobid]) try: self.procs[jobid].run() - except Exception: - self._clean_queue(jobid, graph) + except: + etype, eval, etr = sys.exc_info() + formatted_exc = format_exception(etype, eval, etr) + logger.debug('Traceback:\n%s' % '\n'.join(formatted_exc)) self._task_finished_cb(jobid) self._remove_node_dirs() diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index b99266c8e4..700fdad57b 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -43,22 +43,10 @@ def create_event_dict(start_time, nodes_list): events = {} for node in nodes_list: # Format node fields - try: - estimated_threads = float(node['num_threads']) - except: - estimated_threads = 1 - try: - estimated_memory_gb = float(node['estimated_memory_gb']) - except: - estimated_memory_gb = 1.0 - try: - runtime_threads = float(node['runtime_threads']) - except: - runtime_threads = 0 - try: - runtime_memory_gb = float(node['runtime_memory_gb']) - except: - runtime_memory_gb = 0.0 + estimated_threads = int(node.get('num_threads'), 1) + estimated_memory_gb = float(node.get('estimated_memory_gb', 1.0)) + runtime_threads = int(node.get('runtime_threads'), 0) + runtime_memory_gb = float(node.get('runtime_memory_gb', 0.0)) # Init and format event-based nodes node['estimated_threads'] = estimated_threads @@ -119,7 +107,7 @@ def log_to_dict(logfile): node = None try: node = json.loads(l) - except Exception: + except ValueError: pass if not node: From d83d84941845895546c375315bfa8c218577b878 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 2 May 2016 11:21:57 -0400 Subject: [PATCH 66/78] Reversed logic for afni OMP_NUM_THREADS --- nipype/interfaces/afni/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index b82ef45152..431b024780 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -159,8 +159,7 @@ def __init__(self, **inputs): # Update num threads estimate from OMP_NUM_THREADS env var # Default to 1 if not set - import os - self.num_threads = int(os.getenv('OMP_NUM_THREADS', 1)) + os.environ['OMP_NUM_THREADS'] = str(self.num_threads) def _output_update(self): """ i think? updates class private attribute based on instance input From b21bca6b4b297b4f1dbf567b500794d24b07e8cb Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 2 May 2016 11:52:58 -0400 Subject: [PATCH 67/78] Added traceback crash reporting --- nipype/pipeline/plugins/multiproc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index eea491e898..881c2fb42d 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -238,6 +238,9 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): try: num_subnodes = self.procs[jobid].num_subnodes() except Exception: + etype, eval, etr = sys.exc_info() + traceback = format_exception(etype, eval, etr) + report_crash(self.procs[jobid], traceback=traceback) self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue From 79d198885a6619173f22c2918c07f25cda009fcb Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 2 May 2016 12:09:55 -0400 Subject: [PATCH 68/78] More specific exception handling addressed --- nipype/pipeline/plugins/multiproc.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 881c2fb42d..8612c8d0aa 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -272,6 +272,9 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): self._remove_node_dirs() continue except Exception: + etype, eval, etr = sys.exc_info() + traceback = format_exception(etype, eval, etr) + report_crash(self.procs[jobid], traceback=traceback) self._clean_queue(jobid, graph) self.proc_pending[jobid] = False continue @@ -282,10 +285,10 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): % self.procs[jobid]) try: self.procs[jobid].run() - except: + except Exception: etype, eval, etr = sys.exc_info() - formatted_exc = format_exception(etype, eval, etr) - logger.debug('Traceback:\n%s' % '\n'.join(formatted_exc)) + traceback = format_exception(etype, eval, etr) + report_crash(self.procs[jobid], traceback=traceback) self._task_finished_cb(jobid) self._remove_node_dirs() From fd788f8b103220ecd72b97d4616d1a4a82357e87 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 2 May 2016 14:38:25 -0400 Subject: [PATCH 69/78] Added safeguard for not taking 100% of system memory when plugin_arg 'memory_gb' not specified --- nipype/pipeline/plugins/multiproc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 8612c8d0aa..169822971e 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -137,7 +137,7 @@ def __init__(self, plugin_args=None): non_daemon = True self.plugin_args = plugin_args self.processors = cpu_count() - self.memory_gb = get_system_total_memory_gb() + self.memory_gb = get_system_total_memory_gb()*0.9 # 90% of system memory # Check plugin args if self.plugin_args: From d6cafa2afdbdd84c21b6075590c967b9d60671b3 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 3 May 2016 11:29:15 -0400 Subject: [PATCH 70/78] Fix attribute error related to terminal_output attribute --- nipype/pipeline/plugins/multiproc.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 169822971e..d2a7f7f9b9 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -180,8 +180,9 @@ def _clear_task(self, taskid): def _submit_job(self, node, updatehash=False): self._taskid += 1 - if node.inputs.terminal_output == 'stream': - node.inputs.terminal_output = 'allatonce' + if hasattr(node.inputs, 'terminal_output'): + if node.inputs.terminal_output == 'stream': + node.inputs.terminal_output = 'allatonce' self._taskresult[self._taskid] = \ self.pool.apply_async(run_node, From 9b46c492f50961d62ebb98e8cb53fcfeaf8021b4 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Thu, 5 May 2016 11:07:34 -0400 Subject: [PATCH 71/78] Trying to disable some unittests for TravisCI debugging --- nipype/interfaces/tests/test_runtime_profiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index b9adfe5c19..97c18db7a7 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -18,7 +18,7 @@ skip_profile_msg = 'Missing python packages for runtime profiling, skipping...\n'\ 'Error: %s' % exc run_profiler = False - +run_profiler = False # tmp fix # UseResources inputspec class UseResourcesInputSpec(CommandLineInputSpec): ''' From 46f32759c595da8a29aa4d8511922005b7622ce1 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 09:53:47 -0400 Subject: [PATCH 72/78] Commented out more unittests for TravisCI debugging --- .../pipeline/plugins/tests/test_multiproc.py | 266 +++++++++--------- 1 file changed, 133 insertions(+), 133 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 6c08862523..33fcced13d 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -34,27 +34,27 @@ def _list_outputs(self): return outputs -def test_run_multiproc(): - cur_dir = os.getcwd() - temp_dir = mkdtemp(prefix='test_engine_') - os.chdir(temp_dir) - - pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=TestInterface(), name='mod1') - mod2 = pe.MapNode(interface=TestInterface(), - iterfield=['input1'], - name='mod2') - pipe.connect([(mod1, mod2, [('output1', 'input1')])]) - pipe.base_dir = os.getcwd() - mod1.inputs.input1 = 1 - pipe.config['execution']['poll_sleep_duration'] = 2 - execgraph = pipe.run(plugin="MultiProc") - names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.mod1')] - result = node.get_output('output1') - yield assert_equal, result, [1, 1] - os.chdir(cur_dir) - rmtree(temp_dir) +#def test_run_multiproc(): +# cur_dir = os.getcwd() +# temp_dir = mkdtemp(prefix='test_engine_') +# os.chdir(temp_dir) +# +# pipe = pe.Workflow(name='pipe') +# mod1 = pe.Node(interface=TestInterface(), name='mod1') +# mod2 = pe.MapNode(interface=TestInterface(), +# iterfield=['input1'], +# name='mod2') +# pipe.connect([(mod1, mod2, [('output1', 'input1')])]) +# pipe.base_dir = os.getcwd() +# mod1.inputs.input1 = 1 +# pipe.config['execution']['poll_sleep_duration'] = 2 +# execgraph = pipe.run(plugin="MultiProc") +# names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] +# node = execgraph.nodes()[names.index('pipe.mod1')] +# result = node.get_output('output1') +# yield assert_equal, result, [1, 1] +# os.chdir(cur_dir) +# rmtree(temp_dir) class InputSpecSingleNode(nib.TraitedSpec): @@ -122,115 +122,115 @@ def find_metrics(nodes, last_node): return total_memory, total_threads -def test_do_not_use_more_memory_then_specified(): - LOG_FILENAME = 'callback.log' - my_logger = logging.getLogger('callback') - my_logger.setLevel(logging.DEBUG) - - # Add the log message handler to the logger - handler = logging.FileHandler(LOG_FILENAME) - my_logger.addHandler(handler) - - max_memory = 10 - pipe = pe.Workflow(name='pipe') - n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') - n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') - n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') - n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') - - n1.interface.estimated_memory_gb = 1 - n2.interface.estimated_memory_gb = 1 - n3.interface.estimated_memory_gb = 10 - n4.interface.estimated_memory_gb = 1 - - pipe.connect(n1, 'output1', n2, 'input1') - pipe.connect(n1, 'output1', n3, 'input1') - pipe.connect(n2, 'output1', n4, 'input1') - pipe.connect(n3, 'output1', n4, 'input2') - n1.inputs.input1 = 10 - - pipe.run(plugin='MultiProc', - plugin_args={'memory': max_memory, - 'status_callback': log_nodes_cb}) - - - nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) - last_node = nodes[-1] - #usage in every second - memory, threads = find_metrics(nodes, last_node) - - result = True - for m in memory: - if m > max_memory: - result = False - break - - yield assert_equal, result, True - - max_threads = cpu_count() - - result = True - for t in threads: - if t > max_threads: - result = False - break - - yield assert_equal, result, True,\ - "using more threads than system has (threads is not specified by user)" - - os.remove(LOG_FILENAME) - - -def test_do_not_use_more_threads_then_specified(): - LOG_FILENAME = 'callback.log' - my_logger = logging.getLogger('callback') - my_logger.setLevel(logging.DEBUG) - - # Add the log message handler to the logger - handler = logging.FileHandler(LOG_FILENAME) - my_logger.addHandler(handler) - - max_threads = 10 - pipe = pe.Workflow(name='pipe') - n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') - n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') - n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') - n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') - - n1.interface.num_threads = 1 - n2.interface.num_threads = 1 - n3.interface.num_threads = 10 - n4.interface.num_threads = 1 - - pipe.connect(n1, 'output1', n2, 'input1') - pipe.connect(n1, 'output1', n3, 'input1') - pipe.connect(n2, 'output1', n4, 'input1') - pipe.connect(n3, 'output1', n4, 'input2') - n1.inputs.input1 = 10 - pipe.config['execution']['poll_sleep_duration'] = 1 - pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, - 'status_callback': log_nodes_cb}) - - nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) - last_node = nodes[-1] - #usage in every second - memory, threads = find_metrics(nodes, last_node) - - result = True - for t in threads: - if t > max_threads: - result = False - break - - yield assert_equal, result, True, "using more threads than specified" - - max_memory = get_system_total_memory_gb() - result = True - for m in memory: - if m > max_memory: - result = False - break - yield assert_equal, result, True,\ - "using more memory than system has (memory is not specified by user)" - - os.remove(LOG_FILENAME) +#def test_do_not_use_more_memory_then_specified(): +# LOG_FILENAME = 'callback.log' +# my_logger = logging.getLogger('callback') +# my_logger.setLevel(logging.DEBUG) +# +# # Add the log message handler to the logger +# handler = logging.FileHandler(LOG_FILENAME) +# my_logger.addHandler(handler) +# +# max_memory = 10 +# pipe = pe.Workflow(name='pipe') +# n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') +# n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') +# n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') +# n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') +# +# n1.interface.estimated_memory_gb = 1 +# n2.interface.estimated_memory_gb = 1 +# n3.interface.estimated_memory_gb = 10 +# n4.interface.estimated_memory_gb = 1 +# +# pipe.connect(n1, 'output1', n2, 'input1') +# pipe.connect(n1, 'output1', n3, 'input1') +# pipe.connect(n2, 'output1', n4, 'input1') +# pipe.connect(n3, 'output1', n4, 'input2') +# n1.inputs.input1 = 10 +# +# pipe.run(plugin='MultiProc', +# plugin_args={'memory': max_memory, +# 'status_callback': log_nodes_cb}) +# +# +# nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) +# last_node = nodes[-1] +# #usage in every second +# memory, threads = find_metrics(nodes, last_node) +# +# result = True +# for m in memory: +# if m > max_memory: +# result = False +# break +# +# yield assert_equal, result, True +# +# max_threads = cpu_count() +# +# result = True +# for t in threads: +# if t > max_threads: +# result = False +# break +# +# yield assert_equal, result, True,\ +# "using more threads than system has (threads is not specified by user)" +# +# os.remove(LOG_FILENAME) +# +# +#def test_do_not_use_more_threads_then_specified(): +# LOG_FILENAME = 'callback.log' +# my_logger = logging.getLogger('callback') +# my_logger.setLevel(logging.DEBUG) +# +# # Add the log message handler to the logger +# handler = logging.FileHandler(LOG_FILENAME) +# my_logger.addHandler(handler) +# +# max_threads = 10 +# pipe = pe.Workflow(name='pipe') +# n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') +# n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') +# n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') +# n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') +# +# n1.interface.num_threads = 1 +# n2.interface.num_threads = 1 +# n3.interface.num_threads = 10 +# n4.interface.num_threads = 1 +# +# pipe.connect(n1, 'output1', n2, 'input1') +# pipe.connect(n1, 'output1', n3, 'input1') +# pipe.connect(n2, 'output1', n4, 'input1') +# pipe.connect(n3, 'output1', n4, 'input2') +# n1.inputs.input1 = 10 +# pipe.config['execution']['poll_sleep_duration'] = 1 +# pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, +# 'status_callback': log_nodes_cb}) +# +# nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) +# last_node = nodes[-1] +# #usage in every second +# memory, threads = find_metrics(nodes, last_node) +# +# result = True +# for t in threads: +# if t > max_threads: +# result = False +# break +# +# yield assert_equal, result, True, "using more threads than specified" +# +# max_memory = get_system_total_memory_gb() +# result = True +# for m in memory: +# if m > max_memory: +# result = False +# break +# yield assert_equal, result, True,\ +# "using more memory than system has (memory is not specified by user)" +# +# os.remove(LOG_FILENAME) From 41c69289588cd091bc0b983cfc9c29f4b2f095b9 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 10:17:53 -0400 Subject: [PATCH 73/78] Enabled runtime profiler testing option --- nipype/interfaces/tests/test_runtime_profiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py index 97c18db7a7..b9adfe5c19 100644 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ b/nipype/interfaces/tests/test_runtime_profiler.py @@ -18,7 +18,7 @@ skip_profile_msg = 'Missing python packages for runtime profiling, skipping...\n'\ 'Error: %s' % exc run_profiler = False -run_profiler = False # tmp fix + # UseResources inputspec class UseResourcesInputSpec(CommandLineInputSpec): ''' From f0a3889a0ba8b7e5b3fa8c3ab339eb4c582c60ce Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 10:35:03 -0400 Subject: [PATCH 74/78] Added one test back in --- .../pipeline/plugins/tests/test_multiproc.py | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 33fcced13d..d779ec9de7 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -34,27 +34,27 @@ def _list_outputs(self): return outputs -#def test_run_multiproc(): -# cur_dir = os.getcwd() -# temp_dir = mkdtemp(prefix='test_engine_') -# os.chdir(temp_dir) -# -# pipe = pe.Workflow(name='pipe') -# mod1 = pe.Node(interface=TestInterface(), name='mod1') -# mod2 = pe.MapNode(interface=TestInterface(), -# iterfield=['input1'], -# name='mod2') -# pipe.connect([(mod1, mod2, [('output1', 'input1')])]) -# pipe.base_dir = os.getcwd() -# mod1.inputs.input1 = 1 -# pipe.config['execution']['poll_sleep_duration'] = 2 -# execgraph = pipe.run(plugin="MultiProc") -# names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] -# node = execgraph.nodes()[names.index('pipe.mod1')] -# result = node.get_output('output1') -# yield assert_equal, result, [1, 1] -# os.chdir(cur_dir) -# rmtree(temp_dir) +def test_run_multiproc(): + cur_dir = os.getcwd() + temp_dir = mkdtemp(prefix='test_engine_') + os.chdir(temp_dir) + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=TestInterface(), name='mod1') + mod2 = pe.MapNode(interface=TestInterface(), + iterfield=['input1'], + name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + pipe.config['execution']['poll_sleep_duration'] = 2 + execgraph = pipe.run(plugin="MultiProc") + names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] + node = execgraph.nodes()[names.index('pipe.mod1')] + result = node.get_output('output1') + yield assert_equal, result, [1, 1] + os.chdir(cur_dir) + rmtree(temp_dir) class InputSpecSingleNode(nib.TraitedSpec): From 030593065f2608194c65a83dc442b0652d55308d Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 11:32:23 -0400 Subject: [PATCH 75/78] Added threads unit test back in --- .../pipeline/plugins/tests/test_multiproc.py | 106 +++++++++--------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index d779ec9de7..63679ab1f0 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -181,56 +181,56 @@ def find_metrics(nodes, last_node): # os.remove(LOG_FILENAME) # # -#def test_do_not_use_more_threads_then_specified(): -# LOG_FILENAME = 'callback.log' -# my_logger = logging.getLogger('callback') -# my_logger.setLevel(logging.DEBUG) -# -# # Add the log message handler to the logger -# handler = logging.FileHandler(LOG_FILENAME) -# my_logger.addHandler(handler) -# -# max_threads = 10 -# pipe = pe.Workflow(name='pipe') -# n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') -# n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') -# n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') -# n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') -# -# n1.interface.num_threads = 1 -# n2.interface.num_threads = 1 -# n3.interface.num_threads = 10 -# n4.interface.num_threads = 1 -# -# pipe.connect(n1, 'output1', n2, 'input1') -# pipe.connect(n1, 'output1', n3, 'input1') -# pipe.connect(n2, 'output1', n4, 'input1') -# pipe.connect(n3, 'output1', n4, 'input2') -# n1.inputs.input1 = 10 -# pipe.config['execution']['poll_sleep_duration'] = 1 -# pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, -# 'status_callback': log_nodes_cb}) -# -# nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) -# last_node = nodes[-1] -# #usage in every second -# memory, threads = find_metrics(nodes, last_node) -# -# result = True -# for t in threads: -# if t > max_threads: -# result = False -# break -# -# yield assert_equal, result, True, "using more threads than specified" -# -# max_memory = get_system_total_memory_gb() -# result = True -# for m in memory: -# if m > max_memory: -# result = False -# break -# yield assert_equal, result, True,\ -# "using more memory than system has (memory is not specified by user)" -# -# os.remove(LOG_FILENAME) +def test_do_not_use_more_threads_then_specified(): + LOG_FILENAME = 'callback.log' + my_logger = logging.getLogger('callback') + my_logger.setLevel(logging.DEBUG) + + # Add the log message handler to the logger + handler = logging.FileHandler(LOG_FILENAME) + my_logger.addHandler(handler) + + max_threads = 4 + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') + n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') + n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') + n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') + + n1.interface.num_threads = 1 + n2.interface.num_threads = 1 + n3.interface.num_threads = 4 + n4.interface.num_threads = 1 + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 4 + pipe.config['execution']['poll_sleep_duration'] = 1 + pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, + 'status_callback': log_nodes_cb}) + + nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) + last_node = nodes[-1] + #usage in every second + memory, threads = find_metrics(nodes, last_node) + + result = True + for t in threads: + if t > max_threads: + result = False + break + + yield assert_equal, result, True, "using more threads than specified" + + max_memory = get_system_total_memory_gb() + result = True + for m in memory: + if m > max_memory: + result = False + break + yield assert_equal, result, True,\ + "using more memory than system has (memory is not specified by user)" + + os.remove(LOG_FILENAME) From b566b22b6f5807e399ee70a843ae02d06bfe1f06 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 12:37:18 -0400 Subject: [PATCH 76/78] Changed max memory to 1 GB --- .../pipeline/plugins/tests/test_multiproc.py | 118 +++++++++--------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 63679ab1f0..82574d07fe 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -122,65 +122,65 @@ def find_metrics(nodes, last_node): return total_memory, total_threads -#def test_do_not_use_more_memory_then_specified(): -# LOG_FILENAME = 'callback.log' -# my_logger = logging.getLogger('callback') -# my_logger.setLevel(logging.DEBUG) -# -# # Add the log message handler to the logger -# handler = logging.FileHandler(LOG_FILENAME) -# my_logger.addHandler(handler) -# -# max_memory = 10 -# pipe = pe.Workflow(name='pipe') -# n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') -# n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') -# n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') -# n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') -# -# n1.interface.estimated_memory_gb = 1 -# n2.interface.estimated_memory_gb = 1 -# n3.interface.estimated_memory_gb = 10 -# n4.interface.estimated_memory_gb = 1 -# -# pipe.connect(n1, 'output1', n2, 'input1') -# pipe.connect(n1, 'output1', n3, 'input1') -# pipe.connect(n2, 'output1', n4, 'input1') -# pipe.connect(n3, 'output1', n4, 'input2') -# n1.inputs.input1 = 10 -# -# pipe.run(plugin='MultiProc', -# plugin_args={'memory': max_memory, -# 'status_callback': log_nodes_cb}) -# -# -# nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) -# last_node = nodes[-1] -# #usage in every second -# memory, threads = find_metrics(nodes, last_node) -# -# result = True -# for m in memory: -# if m > max_memory: -# result = False -# break -# -# yield assert_equal, result, True -# -# max_threads = cpu_count() -# -# result = True -# for t in threads: -# if t > max_threads: -# result = False -# break -# -# yield assert_equal, result, True,\ -# "using more threads than system has (threads is not specified by user)" -# -# os.remove(LOG_FILENAME) -# -# +def test_do_not_use_more_memory_then_specified(): + LOG_FILENAME = 'callback.log' + my_logger = logging.getLogger('callback') + my_logger.setLevel(logging.DEBUG) + + # Add the log message handler to the logger + handler = logging.FileHandler(LOG_FILENAME) + my_logger.addHandler(handler) + + max_memory = 1 + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(interface=TestInterfaceSingleNode(), name='n1') + n2 = pe.Node(interface=TestInterfaceSingleNode(), name='n2') + n3 = pe.Node(interface=TestInterfaceSingleNode(), name='n3') + n4 = pe.Node(interface=TestInterfaceSingleNode(), name='n4') + + n1.interface.estimated_memory_gb = 1 + n2.interface.estimated_memory_gb = 1 + n3.interface.estimated_memory_gb = 1 + n4.interface.estimated_memory_gb = 1 + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 1 + + pipe.run(plugin='MultiProc', + plugin_args={'memory': max_memory, + 'status_callback': log_nodes_cb}) + + + nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) + last_node = nodes[-1] + #usage in every second + memory, threads = find_metrics(nodes, last_node) + + result = True + for m in memory: + if m > max_memory: + result = False + break + + yield assert_equal, result, True + + max_threads = cpu_count() + + result = True + for t in threads: + if t > max_threads: + result = False + break + + yield assert_equal, result, True,\ + "using more threads than system has (threads is not specified by user)" + + os.remove(LOG_FILENAME) + + def test_do_not_use_more_threads_then_specified(): LOG_FILENAME = 'callback.log' my_logger = logging.getLogger('callback') From e7eac16155da70b133f6f011f3dd1497cb4de3ed Mon Sep 17 00:00:00 2001 From: dclark87 Date: Mon, 9 May 2016 16:41:39 -0400 Subject: [PATCH 77/78] Fixed some typos --- nipype/utils/draw_gantt_chart.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 700fdad57b..d4fdc88830 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -43,10 +43,10 @@ def create_event_dict(start_time, nodes_list): events = {} for node in nodes_list: # Format node fields - estimated_threads = int(node.get('num_threads'), 1) - estimated_memory_gb = float(node.get('estimated_memory_gb', 1.0)) - runtime_threads = int(node.get('runtime_threads'), 0) - runtime_memory_gb = float(node.get('runtime_memory_gb', 0.0)) + estimated_threads = node.get('num_threads', 1) + estimated_memory_gb = node.get('estimated_memory_gb', 1.0) + runtime_threads = node.get('runtime_threads', 0) + runtime_memory_gb = node.get('runtime_memory_gb', 0.0) # Init and format event-based nodes node['estimated_threads'] = estimated_threads @@ -170,6 +170,9 @@ def calculate_resource_timeseries(events, resource): and the resource amount as values ''' + # Import packages + import pandas as pd + # Init variables res = OrderedDict() all_res = 0.0 @@ -177,11 +180,11 @@ def calculate_resource_timeseries(events, resource): # Iterate through the events for tdelta, event in sorted(events.items()): if event['event'] == "start": - if resource in event and event[resource] != 'Unkown': + if resource in event and event[resource] != 'Unknown': all_res += float(event[resource]) current_time = event['start']; elif event['event'] == "finish": - if resource in event and event[resource] != 'Unkown': + if resource in event and event[resource] != 'Unknown': all_res -= float(event[resource]) current_time = event['finish']; res[current_time] = all_res From e600d57dd4e3ae60d8b50696b59f642ebc018413 Mon Sep 17 00:00:00 2001 From: dclark87 Date: Tue, 10 May 2016 18:47:31 -0400 Subject: [PATCH 78/78] Added missing mark --- doc/users/resource_sched_profiler.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 0382fa6624..99d1a7064c 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -151,10 +151,10 @@ The pandas_ Python package is required to use this feature. generate_gantt_chart('/home/user/run_stats.log', cores=8) # ...creates gantt chart in '/home/user/run_stats.log.html' -The `generate_gantt_chart`` function will create an html file that can be viewed +The ``generate_gantt_chart`` function will create an html file that can be viewed in a browser. Below is an example of the gantt chart displayed in a web browser. Note that when the cursor is hovered over any particular node bubble or resource bubble, some additional information is shown in a pop-up. * - .. image:: images/gantt_chart.png - :width: 100 % \ No newline at end of file + :width: 100 %