Source code for pbcommand.services.service_access_layer

"""Utils for Updating state/progress and results to WebServices


"""
import json
import logging
import pprint
import time
import datetime
import pytz

import requests
from requests import RequestException

from pbcommand.models import (FileTypes,
                              DataSetFileType,
                              DataStore,
                              DataStoreFile)
from pbcommand.utils import get_dataset_metadata, to_ascii

from .models import (SMRTServiceBaseError,
                     JobResult, JobStates, JobExeError, JobTypes,
                     ServiceResourceTypes, ServiceJob, JobEntryPoint,
                     JobTask)

from .utils import to_sal_summary

log = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# log.addHandler(logging.NullHandler())  # to prevent the annoying 'No handlers .. ' msg

# Everything else is considered a non-public
__all__ = ['ServiceAccessLayer']


class Constants(object):
    HEADERS = {'Content-type': 'application/json'}


def __jsonable_request(request_method, headers):
    def wrapper(url, d_):
        data = json.dumps(d_)
        return request_method(url, data=data, headers=headers)
    return wrapper


def _post_requests(headers):
    return __jsonable_request(requests.post, headers)


def _put_requests(headers):
    return __jsonable_request(requests.put, headers)


def _get_requests(headers):
    def wrapper(url):
        return requests.get(url, headers=headers)
    return wrapper

# These are exposed publicly as a utility, but shouldn't be used in any API
# call. The _process_* are the entry points for API calls to make sure an
# errors are handled correctly.
rqpost = _post_requests(Constants.HEADERS)
rqput = _put_requests(Constants.HEADERS)
rqget = _get_requests(Constants.HEADERS)


def _parse_base_service_error(response):
    """:type response: requests.Response

    Don't trust the services. Try to parse the response to SMRT Server Error
    datastructure (even if a 200 is returned)
    """
    if response.ok:
        try:
            d = response.json()
            emsg = SMRTServiceBaseError.from_d(d)
            raise emsg
        except (KeyError, TypeError):
            # couldn't parse response -> error,
            # so everything is fine
            return response
    else:
        return response


def _process_rget(total_url, ignore_errors=False):
    """Process get request and return JSON response. Raise if not successful"""
    r = rqget(total_url)
    _parse_base_service_error(r)
    if not r.ok and not ignore_errors:
        log.error("Failed ({s}) GET to {x}".format(x=total_url, s=r.status_code))
    r.raise_for_status()
    j = r.json()
    return j


def _process_rget_with_transform(func, ignore_errors=False):
    """Post process the JSON result (if successful) with F(json_d) -> T"""
    def wrapper(total_url):
        j = _process_rget(total_url, ignore_errors=ignore_errors)
        return func(j)
    return wrapper


def _process_rget_with_jobs_transform(total_url, ignore_errors=False):
    # defining an internal method, because this used in several places
    jobs_d = _process_rget(total_url, ignore_errors=ignore_errors)
    # Sort by Id desc so newer jobs show up first
    jobs = [ServiceJob.from_d(job_d) for job_d in jobs_d]
    return sorted(jobs, key=lambda x: x.id, reverse=True)


def _process_rget_or_none(func, ignore_errors=False):
    """
    apply the transform func to the output of GET request if it was successful, else returns None

    This is intended to be used for looking up Results by Id where the a 404
    is found.
    """
    def wrapper(total_url):
        try:
            return _process_rget_with_transform(func, ignore_errors)(total_url)
        except (RequestException, SMRTServiceBaseError):
            # FIXME
            # this should be a tighter exception case
            # only look for 404
            return None

    return wrapper


def _process_rget_with_job_transform_or_none(total_url):
    return _process_rget_or_none(ServiceJob.from_d)(total_url)


def __process_creatable_to_json(f):
    def wrapper(total_url, payload_d):
        r = f(total_url, payload_d)
        _parse_base_service_error(r)
        # FIXME This should be strict to only return a 201
        if r.status_code not in (200, 201, 202, 204):
            log.error("Failed ({s} to call {u}".format(u=total_url, s=r.status_code))
            log.error("payload")
            log.error("\n" + pprint.pformat(payload_d))
        r.raise_for_status()
        j = r.json()
        return j
    return wrapper

_process_rpost = __process_creatable_to_json(rqpost)
_process_rput = __process_creatable_to_json(rqput)


def _process_rpost_with_transform(func):
    def wrapper(total_url, payload_d):
        j = _process_rpost(total_url, payload_d)
        return func(j)
    return wrapper


def _process_rput_with_transform(func):
    def wrapper(total_url, payload_d):
        j = _process_rput(total_url, payload_d)
        return func(j)
    return wrapper


def _to_url(base, ext):
    return "".join([base, ext])


def _null_func(x):
    # Pass thorough func
    return x


def _transform_job_tasks(j):
    return [JobTask.from_d(d) for d in j]


def _import_dataset_by_type(dataset_type_or_id):

    if isinstance(dataset_type_or_id, DataSetFileType):
        ds_type_id = dataset_type_or_id.file_type_id
    else:
        ds_type_id = dataset_type_or_id

    def wrapper(total_url, path):
        _d = dict(datasetType=ds_type_id, path=path)
        return _process_rpost_with_transform(ServiceJob.from_d)(total_url, _d)

    return wrapper


def _get_job_by_id_or_raise(sal, job_id, error_klass, error_messge_extras=None):
    job = sal.get_job_by_id(job_id)

    if job is None:
        details = "" if error_messge_extras is None else error_messge_extras
        base_msg = "Failed to find job {i}".format(i=job_id)
        emsg = " ".join([base_msg, details])
        raise error_klass(emsg)

    return job


def _block_for_job_to_complete(sal, job_id, time_out=600, sleep_time=2):
    """
    Waits for job to complete

    :param sal: ServiceAccessLayer
    :param job_id: Job Id
    :param time_out: Total runtime before aborting
    :param sleep_time: polling interval (in sec)

    :rtype: JobResult
    :raises: KeyError if job is not initially found, or JobExeError
    if the job fails during the polling process or times out
    """

    time.sleep(sleep_time)
    job = _get_job_by_id_or_raise(sal, job_id, KeyError)

    log.debug("time_out = {t}".format(t=time_out))

    error_msg = ""
    job_result = JobResult(job, 0, error_msg)
    started_at = time.time()

    # number of polling steps
    i = 0
    while True:
        run_time = time.time() - started_at

        if job.state in JobStates.ALL_COMPLETED:
            break

        i += 1
        time.sleep(sleep_time)

        msg = "Running pipeline {n} state: {s} runtime:{r:.2f} sec {i} iteration".format(n=job.name, s=job.state, r=run_time, i=i)
        log.debug(msg)
        # making the exceptions different to distinguish between an initial
        # error and a "polling" error. Adding some msg details
        job = _get_job_by_id_or_raise(sal, job_id, JobExeError, error_messge_extras=msg)

        # FIXME, there's currently not a good way to get errors for jobs
        job_result = JobResult(job, run_time, "")
        if time_out is not None:
            if run_time > time_out:
                raise JobExeError("Exceeded runtime {r} of {t}. {m}".format(r=run_time, t=time_out, m=msg))

    return job_result

# Make this consistent somehow. Maybe defined 'shortname' in the core model?
# Martin is doing this for the XML file names
DATASET_METATYPES_TO_ENDPOINTS = {
    FileTypes.DS_SUBREADS_H5: "hdfsubreads",
    FileTypes.DS_SUBREADS: "subreads",
    FileTypes.DS_ALIGN: "alignments",
    FileTypes.DS_REF: "references",
    FileTypes.DS_BARCODE: "barcodes",
    FileTypes.DS_CCS: "ccsreads",
    FileTypes.DS_CONTIG: "contigs",
    FileTypes.DS_ALIGN_CCS: "cssalignments",
    FileTypes.DS_GMAP_REF: "gmapreferences"}


def _get_endpoint_or_raise(ds_type):
    if ds_type in DATASET_METATYPES_TO_ENDPOINTS:
        return DATASET_METATYPES_TO_ENDPOINTS[ds_type]
    raise KeyError("Unsupported datasettype {t}. Supported values {v}".format(t=ds_type, v=DATASET_METATYPES_TO_ENDPOINTS.keys()))


def _job_id_or_error(job_or_error, custom_err_msg=None):
    """
    Extract job id from job creation service (by type)
    or Raise exception from an EngineJob response

    :raises: JobExeError
    """
    if isinstance(job_or_error, ServiceJob):
        return job_or_error.id
    else:
        emsg = job_or_error.get('message', "Unknown")
        if custom_err_msg is not None:
            emsg += " {f}".format(f=custom_err_msg)
        raise JobExeError("Failed to create job. {e}. Raw Response {x}".format(e=emsg, x=job_or_error))


def _to_host(h):
    prefix = "http://"
    return h if h.startswith(prefix) else prefix + h


def _to_ds_file(d):
    # is_chunk this isn't exposed at the service level
    return DataStoreFile(d['uuid'], d['sourceId'], d['fileTypeId'], d['path'], is_chunked=False, name=d.get("name", ""), description=d.get("description", ""))


def _to_datastore(dx):
    # Friction to get around service endpoint not returning a list of files
    ds_files = [_to_ds_file(d) for d in dx]
    return DataStore(ds_files)


def _to_job_report_files(dx):
    return [{u"reportTypeId": d["reportTypeId"],
             u"dataStoreFile": _to_ds_file(d["dataStoreFile"])} for d in dx]


def _to_entry_points(d):
    return [JobEntryPoint.from_d(i) for i in d]


def _get_all_report_attributes(sal_get_reports_func, sal_get_reports_details_func, job_id):
    """Util func for getting report Attributes

    Note, this assumes that only one report type has been created. This is
    probably not a great idea. Should re-evaluate this.
    """
    report_datafiles = sal_get_reports_func(job_id)
    report_uuids = [r.values()[0].uuid for r in report_datafiles]
    reports = [sal_get_reports_details_func(job_id, r_uuid) for r_uuid in report_uuids]
    all_report_attributes = {}

    for r in reports:
        for x in r['attributes']:
            all_report_attributes[x['id']] = x['value']

    return all_report_attributes


def _to_relative_tasks_url(job_type):
    def wrapper(job_id_or_uuid):
        return "/".join([ServiceAccessLayer.ROOT_JOBS, job_type, str(job_id_or_uuid), "tasks"])
    return wrapper


[docs]class ServiceAccessLayer(object): """ General Client Access Layer for interfacing with the job types on SMRT Link Analysis Services """ ROOT_JM = "/secondary-analysis/job-manager" ROOT_JOBS = ROOT_JM + "/jobs" ROOT_DS = "/secondary-analysis/datasets" ROOT_PT = '/secondary-analysis/resolved-pipeline-templates' # in sec when blocking to run a job JOB_DEFAULT_TIMEOUT = 60 * 30 def __init__(self, base_url, port, debug=False, sleep_time=2): """ :param base_url: base url of the SL Server (e.g, smrtlink-alpha, or http://smrtlink-alpha) :param port: port of the SL server :param debug: set improved debugging output on Services request failures :param sleep_time: sleep time (in seconds) between polling for job status """ self.base_url = _to_host(base_url) self.port = port # This will display verbose details with respect to the failed request self.debug = debug self._sleep_time = sleep_time @property def uri(self): return "{b}:{u}".format(b=self.base_url, u=self.port) def _to_url(self, rest): return _to_url(self.uri, rest) def __repr__(self): return "<{k} {u} >".format(k=self.__class__.__name__, u=self.uri)
[docs] def to_summary(self): """ Returns a summary of System status, DataSets, and Jobs in the system :rtype: str """ return to_sal_summary(self)
[docs] def get_status(self): """Get status of the server :rtype: dict """ # This should be converted to a concrete typed object return _process_rget(_to_url(self.uri, "/status"))
[docs] def get_job_by_type_and_id(self, job_type, job_id): return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}".format(i=job_id, t=job_type, p=ServiceAccessLayer.ROOT_JOBS)))
[docs] def get_job_by_id(self, job_id): """Get a Job by int id""" # FIXME. Make this an internal method It's ambiguous which job type type you're asking for return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{r}/{i}".format(i=job_id, r=ServiceAccessLayer.ROOT_JOBS)))
def _get_job_resource_type(self, job_type, job_id, resource_type_id): # grab the datastore or the reports _d = dict(t=job_type, i=job_id, r=resource_type_id, p=ServiceAccessLayer.ROOT_JOBS) return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d))) def _get_job_resource_type_with_transform(self, job_type, job_id, resource_type_id, transform_func): _d = dict(t=job_type, i=job_id, r=resource_type_id, p=ServiceAccessLayer.ROOT_JOBS) return _process_rget_or_none(transform_func)(_to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d))) def _get_jobs_by_job_type(self, job_type): return _process_rget_with_jobs_transform(_to_url(self.uri, "{p}/{t}".format(t=job_type, p=ServiceAccessLayer.ROOT_JOBS)))
[docs] def get_analysis_jobs(self): """:rtype: list[ServiceJob]""" return self._get_jobs_by_job_type(JobTypes.PB_PIPE)
[docs] def get_import_dataset_jobs(self): """:rtype: list[ServiceJob]""" return self._get_jobs_by_job_type(JobTypes.IMPORT_DS)
[docs] def get_merge_dataset_jobs(self): """:rtype: list[ServiceJob]""" return self._get_jobs_by_job_type(JobTypes.MERGE_DS)
[docs] def get_fasta_convert_jobs(self): """:rtype: list[ServiceJob]""" self._get_jobs_by_job_type(JobTypes.CONVERT_FASTA)
[docs] def get_analysis_job_by_id(self, job_id): """Get an Analysis job by id or UUID or return None :rtype: ServiceJob """ return self.get_job_by_type_and_id(JobTypes.PB_PIPE, job_id)
[docs] def get_import_job_by_id(self, job_id): return self.get_job_by_type_and_id(JobTypes.IMPORT_DS, job_id)
[docs] def get_analysis_job_datastore(self, job_id): """Get DataStore output from (pbsmrtpipe) analysis job""" # this doesn't work the list is sli return self._get_job_resource_type_with_transform(JobTypes.PB_PIPE, job_id, ServiceResourceTypes.DATASTORE, _to_datastore)
[docs] def get_analysis_job_reports(self, job_id): """Get list of DataStore ReportFile types output from (pbsmrtpipe) analysis job""" return self._get_job_resource_type_with_transform(JobTypes.PB_PIPE, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)
[docs] def get_analysis_job_report_details(self, job_id, report_uuid): _d = dict(t=JobTypes.PB_PIPE, i=job_id, r=ServiceResourceTypes.REPORTS, p=ServiceAccessLayer.ROOT_JOBS, u=report_uuid) return _process_rget_or_none(lambda x: x)(_to_url(self.uri, "{p}/{t}/{i}/{r}/{u}".format(**_d)))
[docs] def get_analysis_job_report_attrs(self, job_id): """Return a dict of all the Report Attributes""" return _get_all_report_attributes(self.get_analysis_job_reports, self.get_analysis_job_report_details, job_id)
[docs] def get_import_job_reports(self, job_id): return self._get_job_resource_type_with_transform(JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)
[docs] def get_import_job_report_details(self, job_id, report_uuid): # It would have been better to return a Report instance, not raw json _d = dict(t=JobTypes.IMPORT_DS, i=job_id, r=ServiceResourceTypes.REPORTS, p=ServiceAccessLayer.ROOT_JOBS, u=report_uuid) return _process_rget_or_none(lambda x: x)(_to_url(self.uri, "{p}/{t}/{i}/{r}/{u}".format(**_d)))
[docs] def get_import_job_report_attrs(self, job_id): """Return a dict of all the Report Attributes""" return _get_all_report_attributes(self.get_import_job_reports, self.get_import_job_report_details, job_id)
[docs] def get_analysis_job_entry_points(self, job_id): return self._get_job_resource_type_with_transform(JobTypes.PB_PIPE, job_id, ServiceResourceTypes.ENTRY_POINTS, _to_entry_points)
[docs] def get_import_dataset_job_datastore(self, job_id): """Get a List of Service DataStore files from an import DataSet job""" return self._get_job_resource_type(JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.DATASTORE)
[docs] def get_merge_dataset_job_datastore(self, job_id): return self._get_job_resource_type(JobTypes.MERGE_DS, job_id, ServiceResourceTypes.DATASTORE)
def _import_dataset(self, dataset_type, path): # This returns a job resource url = self._to_url("{p}/{x}".format(x=JobTypes.IMPORT_DS, p=ServiceAccessLayer.ROOT_JOBS)) return _import_dataset_by_type(dataset_type)(url, path)
[docs] def run_import_dataset_by_type(self, dataset_type, path_to_xml): job_or_error = self._import_dataset(dataset_type, path_to_xml) custom_err_msg = "Import {d} {p}".format(p=path_to_xml, d=dataset_type) job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg) return _block_for_job_to_complete(self, job_id, sleep_time=self._sleep_time)
def _run_import_and_block(self, func, path, time_out=None): # func while be self.import_dataset_X job_or_error = func(path) custom_err_msg = "Import {p}".format(p=path) job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg) return _block_for_job_to_complete(self, job_id, time_out=time_out, sleep_time=self._sleep_time)
[docs] def import_dataset_subread(self, path): return self._import_dataset(FileTypes.DS_SUBREADS, path)
[docs] def run_import_dataset_subread(self, path, time_out=10): return self._run_import_and_block(self.import_dataset_subread, path, time_out=time_out)
[docs] def import_dataset_hdfsubread(self, path): return self._import_dataset(FileTypes.DS_SUBREADS_H5, path)
[docs] def run_import_dataset_hdfsubread(self, path, time_out=10): return self._run_import_and_block(self.import_dataset_hdfsubread, path, time_out=time_out)
[docs] def import_dataset_reference(self, path): return self._import_dataset(FileTypes.DS_REF, path)
[docs] def run_import_dataset_reference(self, path, time_out=10): return self._run_import_and_block(self.import_dataset_reference, path, time_out=time_out)
[docs] def import_dataset_barcode(self, path): return self._import_dataset(FileTypes.DS_BARCODE, path)
[docs] def run_import_dataset_barcode(self, path, time_out=10): return self._run_import_and_block(self.import_dataset_barcode, path, time_out=time_out)
[docs] def run_import_local_dataset(self, path): """Import a file from FS that is local to where the services are running Returns a JobResult instance :rtype: JobResult """ dataset_meta_type = get_dataset_metadata(path) def _verify_dataset_in_list(): file_type = FileTypes.ALL()[dataset_meta_type.metatype] ds_endpoint = _get_endpoint_or_raise(file_type) # all datasets for a specific type datasets = self._get_datasets_by_type(ds_endpoint) uuids = {ds['uuid'] for ds in datasets} if dataset_meta_type.uuid not in uuids: raise JobExeError(("Dataset {u} was imported but does not "+ "appear in the dataset list; this may "+ "indicate XML schema errors.").format( u=dataset_meta_type.uuid)) result = self.get_dataset_by_uuid(dataset_meta_type.uuid, ignore_errors=True) if result is None: log.info("Importing dataset {p}".format(p=path)) job_result = self.run_import_dataset_by_type(dataset_meta_type.metatype, path) log.info("Confirming database update") # validation 1: attempt to retrieve dataset info result_new = self.get_dataset_by_uuid(dataset_meta_type.uuid) if result_new is None: raise JobExeError(("Dataset {u} was imported but could "+ "not be retrieved; this may indicate "+ "XML schema errors.").format( u=dataset_meta_type.uuid)) # validation 2: make sure it shows up in the listing _verify_dataset_in_list() return job_result else: log.info("{f} already imported. Skipping importing. {r}".format(r=result, f=dataset_meta_type.metatype)) _verify_dataset_in_list() # need to clean this up return JobResult(self.get_job_by_id(result['jobId']), 0, "")
[docs] def get_dataset_by_uuid(self, int_or_uuid, ignore_errors=False): """The recommend model is to look up DataSet type by explicit MetaType Returns None if the dataset was not found """ return _process_rget_or_none(_null_func, ignore_errors=ignore_errors)( _to_url(self.uri, "{p}/{i}".format(i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)))
[docs] def get_dataset_by_id(self, dataset_type, int_or_uuid): """Get a Dataset using the DataSetMetaType and (int|uuid) of the dataset""" ds_endpoint = _get_endpoint_or_raise(dataset_type) return _process_rget(_to_url(self.uri, "{p}/{t}/{i}".format(t=ds_endpoint, i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)))
def _get_datasets_by_type(self, dstype): return _process_rget(_to_url(self.uri, "{p}/{i}".format(i=dstype, p=ServiceAccessLayer.ROOT_DS)))
[docs] def get_subreadset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_SUBREADS, int_or_uuid)
[docs] def get_subreadsets(self): return self._get_datasets_by_type("subreads")
[docs] def get_hdfsubreadset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_SUBREADS_H5, int_or_uuid)
[docs] def get_hdfsubreadsets(self): return self._get_datasets_by_type("hdfsubreads")
[docs] def get_referenceset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_REF, int_or_uuid)
[docs] def get_referencesets(self): return self._get_datasets_by_type("references")
[docs] def get_barcodeset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_BARCODE, int_or_uuid)
[docs] def get_barcodesets(self): return self._get_datasets_by_type("barcodes")
[docs] def get_alignmentset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_ALIGN, int_or_uuid)
[docs] def get_ccsreadset_by_id(self, int_or_uuid): return self.get_dataset_by_id(FileTypes.DS_CCS, int_or_uuid)
[docs] def get_ccsreadsets(self): return self._get_datasets_by_type("ccsreads")
[docs] def get_alignmentsets(self): return self._get_datasets_by_type("alignments")
[docs] def import_fasta(self, fasta_path, name, organism, ploidy): """Convert fasta file to a ReferenceSet and Import. Returns a Job """ d = dict(path=fasta_path, name=name, organism=organism, ploidy=ploidy) return _process_rpost_with_transform(ServiceJob.from_d)(self._to_url("{p}/{t}".format(p=ServiceAccessLayer.ROOT_JOBS, t=JobTypes.CONVERT_FASTA)), d)
[docs] def run_import_fasta(self, fasta_path, name, organism, ploidy, time_out=JOB_DEFAULT_TIMEOUT): """Import a Reference into a Block""""" job_or_error = self.import_fasta(fasta_path, name, organism, ploidy) _d = dict(f=fasta_path, n=name, o=organism, p=ploidy) custom_err_msg = "Fasta-convert path:{f} name:{n} organism:{o} ploidy:{p}".format(**_d) job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg) return _block_for_job_to_complete(self, job_id, time_out=time_out, sleep_time=self._sleep_time)
[docs] def create_logger_resource(self, idx, name, description): _d = dict(id=idx, name=name, description=description) return _process_rpost(_to_url(self.uri, "/smrt-base/loggers"), _d)
[docs] def log_progress_update(self, job_type_id, job_id, message, level, source_id): """This is the generic job logging mechanism""" _d = dict(message=message, level=level, sourceId=source_id) return _process_rpost(_to_url(self.uri, "{p}/{t}/{i}/log".format(t=job_type_id, i=job_id, p=ServiceAccessLayer.ROOT_JOBS)), _d)
[docs] def get_pipeline_template_by_id(self, pipeline_template_id): return _process_rget(_to_url(self.uri, "{p}/{i}".format(i=pipeline_template_id, p=ServiceAccessLayer.ROOT_PT)))
[docs] def create_by_pipeline_template_id(self, name, pipeline_template_id, epoints, task_options=()): """Creates and runs a pbsmrtpipe pipeline by pipeline template id""" # sanity checking to see if pipeline is valid _ = self.get_pipeline_template_by_id(pipeline_template_id) seps = [dict(entryId=e.entry_id, fileTypeId=e.dataset_type, datasetId=e.resource) for e in epoints] def _to_o(opt_id, opt_value, option_type_id): return dict(optionId=opt_id, value=opt_value, optionTypeId=option_type_id) task_options = list(task_options) # FIXME. Need to define this in the scenario IO layer. # workflow_options = [_to_o("woption_01", "value_01")] workflow_options = [] d = dict(name=name, pipelineId=pipeline_template_id, entryPoints=seps, taskOptions=task_options, workflowOptions=workflow_options) raw_d = _process_rpost(_to_url(self.uri, "{r}/{p}".format(p=JobTypes.PB_PIPE, r=ServiceAccessLayer.ROOT_JOBS)), d) return ServiceJob.from_d(raw_d)
[docs] def run_by_pipeline_template_id(self, name, pipeline_template_id, epoints, task_options=(), time_out=JOB_DEFAULT_TIMEOUT): """Blocks and runs a job with a timeout""" job_or_error = self.create_by_pipeline_template_id(name, pipeline_template_id, epoints, task_options=task_options) _d = dict(name=name, p=pipeline_template_id, eps=epoints) custom_err_msg = "Job {n} args: {a}".format(n=name, a=_d) job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg) return _block_for_job_to_complete(self, job_id, time_out=time_out, sleep_time=self._sleep_time)
[docs] def get_analysis_job_tasks(self, job_id_or_uuid): """Get all the Task associated with a Job by UUID or Int Id""" job_url = self._to_url(_to_relative_tasks_url(JobTypes.PB_PIPE)(job_id_or_uuid)) return _process_rget_with_transform(_transform_job_tasks)(job_url)
[docs] def get_import_job_tasks(self, job_id_or_uuid): # this is more for testing purposes job_url = self._to_url(_to_relative_tasks_url(JobTypes.IMPORT_DS)(job_id_or_uuid)) return _process_rget_with_transform(_transform_job_tasks)(job_url)
def __run_and_ignore_errors(f, warn_message): """ Black hole ignoring exceptions from a func with no-args and logging the error has a warning. """ try: return f() except Exception as e: log.warn(warn_message + " {e}".format(e=e)) def _run_func(f, warn_message, ignore_errors=True): if ignore_errors: return __run_and_ignore_errors(f, warn_message) else: return f() def log_pbsmrtpipe_progress(total_url, message, level, source_id, ignore_errors=True): """Log the status of a pbsmrtpipe to SMRT Server""" # Keeping this as public to avoid breaking pbsmrtpipe. The # new public interface should be the JobServiceClient # Need to clarify the model here. Trying to pass the most minimal # data necessary to pbsmrtpipe. _d = dict(message=message, level=level, sourceId=source_id) warn_message = "Failed Request to {u} data: {d}".format(u=total_url, d=_d) def f(): return _process_rpost(total_url, _d) return _run_func(f, warn_message, ignore_errors=ignore_errors) def add_datastore_file(total_url, datastore_file, ignore_errors=True): """Add datastore to SMRT Server :type datastore_file: DataStoreFile """ # Keeping this as public to avoid breaking pbsmrtpipe. The # new public interface should be the JobServiceClient _d = datastore_file.to_dict() warn_message = "Failed Request to {u} data: {d}.".format(u=total_url, d=_d) def f(): return _process_rpost(total_url, _d) return _run_func(f, warn_message, ignore_errors=ignore_errors) def _create_job_task(job_tasks_url, create_job_task_record, ignore_errors=True): """ :type create_job_task_record: CreateJobTaskRecord :rtype: JobTask """ warn_message = "Unable to create Task {c}".format(c=repr(create_job_task_record)) def f(): return _process_rpost_with_transform(JobTask.from_d)(job_tasks_url, create_job_task_record.to_dict()) return _run_func(f, warn_message, ignore_errors) def _update_job_task_state(task_url, update_job_task_record, ignore_errors=True): """ :type update_job_task_record: UpdateJobTaskRecord :rtype: JobTask """ warn_message = "Unable to update Task {c}".format(c=repr(update_job_task_record)) def f(): return _process_rput_with_transform(JobTask.from_d)(task_url, update_job_task_record.to_dict()) return _run_func(f, warn_message, ignore_errors) class CreateJobTaskRecord(object): def __init__(self, task_uuid, task_id, task_type_id, name, state, created_at=None): self.task_uuid = task_uuid self.task_id = task_id self.task_type_id = task_type_id self.name = name # this must be consistent with the EngineJob states in the scala code self.state = state # Note, the created_at timestamp must have the form # 2016-02-18T23:24:46.569Z # or # 2016-02-18T15:24:46.569-08:00 self.created_at = datetime.datetime.now(pytz.utc) if created_at is None else created_at def __repr__(self): _d = dict(k=self.__class__.__name__, u=self.task_uuid, i=self.task_id, n=self.name, s=self.state) return "<{k} uuid:{u} ix:{i} state:{s} name:{n} >".format(**_d) def to_dict(self): return dict(uuid=self.task_uuid, taskId=self.task_id, taskTypeId=self.task_type_id, name=self.name, state=self.state, createdAt=self.created_at.isoformat()) class UpdateJobTaskRecord(object): def __init__(self, task_uuid, state, message, error_message=None): """:type error_message: str | None""" self.task_uuid = task_uuid self.state = state self.message = message # detailed error message (e.g., terse stack trace) self.error_message = error_message @staticmethod def from_error(task_uuid, state, message, error_message): # require an detailed error message return UpdateJobTaskRecord(task_uuid, state, message, error_message=error_message) def __repr__(self): _d = dict(k=self.__class__.__name__, i=self.task_uuid, s=self.state) return "<{k} i:{i} state:{s} >".format(**_d) def to_dict(self): _d = dict(uuid=self.task_uuid, state=self.state, message=self.message) # spray API is a little odd here. it will complain about # Expected String as JsString, but got null # even though the model is Option[String] if self.error_message is not None: _d['errorMessage'] = self.error_message return _d class JobServiceClient(object): # Keeping this class private. It should only be used from pbsmrtpipe def __init__(self, job_root_url, ignore_errors=False): """ :param job_root_url: Full Root URL to the job :type job_root_url: str :param ignore_errors: Only log errors, don't not raise if a request fails. This is intended to be used in a fire-and-forget usecase :type ignore_errors: bool This hides the root location of the URL and hides the job id (as an int or uuid) The Url has the form: http://localhost:8888/secondary-analysis/job-manager/jobs/pbsmrtpipe/1234 or http://localhost:8888/secondary-analysis/job-manager/jobs/pbsmrtpipe/5d562c74-e452-11e6-8b96-3c15c2cc8f88 """ self.job_root_url = job_root_url self.ignore_errors = ignore_errors def __repr__(self): _d = dict(k=self.__class__.__name__, u=self.job_root_url) return "<{k} Job URL:{u} >".format(**_d) def to_url(self, segment): return "{i}/{s}".format(i=self.job_root_url, s=segment) @property def log_url(self): return self.to_url("log") @property def datastore_url(self): return self.to_url("datastore") @property def tasks_url(self): return self.to_url("tasks") def get_task_url(self, task_uuid): """ :param task_uuid: Task UUID :return: """ return self.to_url("tasks/{t}".format(t=task_uuid)) def log_workflow_progress(self, message, level, source_id, ignore_errors=True): return log_pbsmrtpipe_progress(self.log_url, message, level, source_id, ignore_errors=ignore_errors) def add_datastore_file(self, datastore_file, ignore_errors=True): return add_datastore_file(self.datastore_url, datastore_file, ignore_errors=ignore_errors) def create_task(self, task_uuid, task_id, task_type_id, name, created_at=None): """ :param task_uuid: Globally unique task id :param task_id: Unique within respect to the job :param task_type_id: ToolContract or task id (e.g., pbcommand.tasks.alpha) :param name: Display name of task :param created_at: time task was created at (will be set if current time if None) """ r = CreateJobTaskRecord(task_uuid, task_id, task_type_id, name, JobStates.CREATED, created_at=created_at) return _create_job_task(self.tasks_url, r) def update_task_status(self, task_uuid, state, message, error_message=None): task_url = self.get_task_url(task_uuid) u = UpdateJobTaskRecord(task_uuid, state, message, error_message=error_message) return _update_job_task_state(task_url, u, ignore_errors=self.ignore_errors) def update_task_to_failed(self, task_uuid, message, detailed_error_message): task_url = self.get_task_url(task_uuid) state = JobStates.FAILED u = UpdateJobTaskRecord(task_uuid, state, message, error_message=detailed_error_message) return _update_job_task_state(task_url, u)