diff --git a/Pipfile.lock b/Pipfile.lock index 96f40de8..06c3fc03 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -48,11 +48,11 @@ }, "apscheduler": { "hashes": [ - "sha256:1cab7f2521e107d07127b042155b632b7a1cd5e02c34be5a28ff62f77c900c6a", - "sha256:c06cc796d5bb9eb3c4f77727f6223476eb67749e7eea074d1587550702a7fbe3" + "sha256:793b2d37c52ece53e34626619e6142e99b20b59a12155f39e1e6932e324f079d", + "sha256:82d6d21b2f0343510d07bb35618333a794653439a265635555b12935647b460a" ], "index": "pypi", - "version": "==3.7.0" + "version": "==3.8.0" }, "attrs": { "hashes": [ @@ -1039,49 +1039,49 @@ }, "regex": { "hashes": [ - "sha256:04f6b9749e335bb0d2f68c707f23bb1773c3fb6ecd10edf0f04df12a8920d468", - "sha256:08d74bfaa4c7731b8dac0a992c63673a2782758f7cfad34cf9c1b9184f911354", - "sha256:0fc1f8f06977c2d4f5e3d3f0d4a08089be783973fc6b6e278bde01f0544ff308", - "sha256:121f4b3185feaade3f85f70294aef3f777199e9b5c0c0245c774ae884b110a2d", - "sha256:1413b5022ed6ac0d504ba425ef02549a57d0f4276de58e3ab7e82437892704fc", - "sha256:1743345e30917e8c574f273f51679c294effba6ad372db1967852f12c76759d8", - "sha256:28fc475f560d8f67cc8767b94db4c9440210f6958495aeae70fac8faec631797", - "sha256:31a99a4796bf5aefc8351e98507b09e1b09115574f7c9dbb9cf2111f7220d2e2", - "sha256:328a1fad67445550b982caa2a2a850da5989fd6595e858f02d04636e7f8b0b13", - "sha256:473858730ef6d6ff7f7d5f19452184cd0caa062a20047f6d6f3e135a4648865d", - "sha256:4cde065ab33bcaab774d84096fae266d9301d1a2f5519d7bd58fc55274afbf7a", - "sha256:5f6a808044faae658f546dd5f525e921de9fa409de7a5570865467f03a626fc0", - "sha256:610b690b406653c84b7cb6091facb3033500ee81089867ee7d59e675f9ca2b73", - "sha256:66256b6391c057305e5ae9209941ef63c33a476b73772ca967d4a2df70520ec1", - "sha256:6eebf512aa90751d5ef6a7c2ac9d60113f32e86e5687326a50d7686e309f66ed", - "sha256:79aef6b5cd41feff359acaf98e040844613ff5298d0d19c455b3d9ae0bc8c35a", - "sha256:808ee5834e06f57978da3e003ad9d6292de69d2bf6263662a1a8ae30788e080b", - "sha256:8e44769068d33e0ea6ccdf4b84d80c5afffe5207aa4d1881a629cf0ef3ec398f", - "sha256:999ad08220467b6ad4bd3dd34e65329dd5d0df9b31e47106105e407954965256", - "sha256:9b006628fe43aa69259ec04ca258d88ed19b64791693df59c422b607b6ece8bb", - "sha256:9d05ad5367c90814099000442b2125535e9d77581855b9bee8780f1b41f2b1a2", - "sha256:a577a21de2ef8059b58f79ff76a4da81c45a75fe0bfb09bc8b7bb4293fa18983", - "sha256:a617593aeacc7a691cc4af4a4410031654f2909053bd8c8e7db837f179a630eb", - "sha256:abb48494d88e8a82601af905143e0de838c776c1241d92021e9256d5515b3645", - "sha256:ac88856a8cbccfc14f1b2d0b829af354cc1743cb375e7f04251ae73b2af6adf8", - "sha256:b4c220a1fe0d2c622493b0a1fd48f8f991998fb447d3cd368033a4b86cf1127a", - "sha256:b844fb09bd9936ed158ff9df0ab601e2045b316b17aa8b931857365ea8586906", - "sha256:bdc178caebd0f338d57ae445ef8e9b737ddf8fbc3ea187603f65aec5b041248f", - "sha256:c206587c83e795d417ed3adc8453a791f6d36b67c81416676cad053b4104152c", - "sha256:c61dcc1cf9fd165127a2853e2c31eb4fb961a4f26b394ac9fe5669c7a6592892", - "sha256:c7cb4c512d2d3b0870e00fbbac2f291d4b4bf2634d59a31176a87afe2777c6f0", - "sha256:d4a332404baa6665b54e5d283b4262f41f2103c255897084ec8f5487ce7b9e8e", - "sha256:d5111d4c843d80202e62b4fdbb4920db1dcee4f9366d6b03294f45ed7b18b42e", - "sha256:e1e8406b895aba6caa63d9fd1b6b1700d7e4825f78ccb1e5260551d168db38ed", - "sha256:e8690ed94481f219a7a967c118abaf71ccc440f69acd583cab721b90eeedb77c", - "sha256:ed283ab3a01d8b53de3a05bfdf4473ae24e43caee7dcb5584e86f3f3e5ab4374", - "sha256:ed4b50355b066796dacdd1cf538f2ce57275d001838f9b132fab80b75e8c84dd", - "sha256:ee329d0387b5b41a5dddbb6243a21cb7896587a651bebb957e2d2bb8b63c0791", - "sha256:f3bf1bc02bc421047bfec3343729c4bbbea42605bcfd6d6bfe2c07ade8b12d2a", - "sha256:f585cbbeecb35f35609edccb95efd95a3e35824cd7752b586503f7e6087303f1", - "sha256:f60667673ff9c249709160529ab39667d1ae9fd38634e006bec95611f632e759" + "sha256:0628ed7d6334e8f896f882a5c1240de8c4d9b0dd7c7fb8e9f4692f5684b7d656", + "sha256:09eb62654030f39f3ba46bc6726bea464069c29d00a9709e28c9ee9623a8da4a", + "sha256:0bba1f6df4eafe79db2ecf38835c2626dbd47911e0516f6962c806f83e7a99ae", + "sha256:10a7a9cbe30bd90b7d9a1b4749ef20e13a3528e4215a2852be35784b6bd070f0", + "sha256:17310b181902e0bb42b29c700e2c2346b8d81f26e900b1328f642e225c88bce1", + "sha256:1e8d1898d4fb817120a5f684363b30108d7b0b46c7261264b100d14ec90a70e7", + "sha256:2054dea683f1bda3a804fcfdb0c1c74821acb968093d0be16233873190d459e3", + "sha256:29385c4dbb3f8b3a55ce13de6a97a3d21bd00de66acd7cdfc0b49cb2f08c906c", + "sha256:295bc8a13554a25ad31e44c4bedabd3c3e28bba027e4feeb9bb157647a2344a7", + "sha256:2cdb3789736f91d0b3333ac54d12a7e4f9efbc98f53cb905d3496259a893a8b3", + "sha256:3baf3eaa41044d4ced2463fd5d23bf7bd4b03d68739c6c99a59ce1f95599a673", + "sha256:4e61100200fa6ab7c99b61476f9f9653962ae71b931391d0264acfb4d9527d9c", + "sha256:6266fde576e12357b25096351aac2b4b880b0066263e7bc7a9a1b4307991bb0e", + "sha256:650c4f1fc4273f4e783e1d8e8b51a3e2311c2488ba0fcae6425b1e2c248a189d", + "sha256:658e3477676009083422042c4bac2bdad77b696e932a3de001c42cc046f8eda2", + "sha256:6adc1bd68f81968c9d249aab8c09cdc2cbe384bf2d2cb7f190f56875000cdc72", + "sha256:6c4d83d21d23dd854ffbc8154cf293f4e43ba630aa9bd2539c899343d7f59da3", + "sha256:6f74b6d8f59f3cfb8237e25c532b11f794b96f5c89a6f4a25857d85f84fbef11", + "sha256:7783d89bd5413d183a38761fbc68279b984b9afcfbb39fa89d91f63763fbfb90", + "sha256:7e3536f305f42ad6d31fc86636c54c7dafce8d634e56fef790fbacb59d499dd5", + "sha256:821e10b73e0898544807a0692a276e539e5bafe0a055506a6882814b6a02c3ec", + "sha256:835962f432bce92dc9bf22903d46c50003c8d11b1dc64084c8fae63bca98564a", + "sha256:85c61bee5957e2d7be390392feac7e1d7abd3a49cbaed0c8cee1541b784c8561", + "sha256:86f9931eb92e521809d4b64ec8514f18faa8e11e97d6c2d1afa1bcf6c20a8eab", + "sha256:8a5c2250c0a74428fd5507ae8853706fdde0f23bfb62ee1ec9418eeacf216078", + "sha256:8aec4b4da165c4a64ea80443c16e49e3b15df0f56c124ac5f2f8708a65a0eddc", + "sha256:8c268e78d175798cd71d29114b0a1f1391c7d011995267d3b62319ec1a4ecaa1", + "sha256:8d80087320632457aefc73f686f66139801959bf5b066b4419b92be85be3543c", + "sha256:95e89a8558c8c48626dcffdf9c8abac26b7c251d352688e7ab9baf351e1c7da6", + "sha256:9c371dd326289d85906c27ec2bc1dcdedd9d0be12b543d16e37bad35754bde48", + "sha256:9c7cb25adba814d5f419733fe565f3289d6fa629ab9e0b78f6dff5fa94ab0456", + "sha256:a731552729ee8ae9c546fb1c651c97bf5f759018fdd40d0e9b4d129e1e3a44c8", + "sha256:aea4006b73b555fc5bdb650a8b92cf486d678afa168cf9b38402bb60bf0f9c18", + "sha256:b0e3f59d3c772f2c3baaef2db425e6fc4149d35a052d874bb95ccfca10a1b9f4", + "sha256:b15dc34273aefe522df25096d5d087abc626e388a28a28ac75a4404bb7668736", + "sha256:c000635fd78400a558bd7a3c2981bb2a430005ebaa909d31e6e300719739a949", + "sha256:c31f35a984caffb75f00a86852951a337540b44e4a22171354fb760cefa09346", + "sha256:c50a6379763c733562b1fee877372234d271e5c78cd13ade5f25978aa06744db", + "sha256:c94722bf403b8da744b7d0bb87e1f2529383003ceec92e754f768ef9323f69ad", + "sha256:dcbbc9cfa147d55a577d285fd479b43103188855074552708df7acc31a476dd9", + "sha256:fb9f5844db480e2ef9fce3a72e71122dd010ab7b2920f777966ba25f7eb63819" ], - "version": "==2021.8.28" + "version": "==2021.9.24" }, "requests": { "hashes": [ @@ -1183,7 +1183,7 @@ }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow", - "ref": "3330f2a3d098737ec79350a0b853aca7493e3957" + "ref": "d911632569d73ec5e039504005a740c3d4d50451" }, "sqlalchemy": { "hashes": [ @@ -1322,11 +1322,11 @@ }, "zipp": { "hashes": [ - "sha256:957cfda87797e389580cb8b9e3870841ca991e2125350677b2ca83a0e99390a3", - "sha256:f5812b1e007e48cff63449a5e9f4e7ebea716b4111f9c4f9a645f91d579bf0c4" + "sha256:1fc9641b26f3bd81069b7738b039f2819cab6e3fc3399a953e19d92cc81eff4d", + "sha256:8dc6c4d5a809d659067cc713f76bcf42fae8ae641db12fddfa93694a15abc96b" ], "markers": "python_version < '3.10'", - "version": "==3.5.0" + "version": "==3.5.1" } }, "develop": { diff --git a/crc/api.yml b/crc/api.yml index c589e868..72ce1a10 100755 --- a/crc/api.yml +++ b/crc/api.yml @@ -145,6 +145,7 @@ paths: description: The remote endpoint schema: type: string + example: https://testing.crconnect.uvadcos.io/api tags: - Workflow Sync API responses: diff --git a/crc/api/workflow_sync.py b/crc/api/workflow_sync.py index a3b35022..2aba52e3 100644 --- a/crc/api/workflow_sync.py +++ b/crc/api/workflow_sync.py @@ -159,6 +159,9 @@ def create_or_update_local_spec(remote,workflow_spec_id): session.add(local_category) local_spec.category = local_category + # Add the local spec to the database, then we can link the libraries. + session.add(local_spec) + # Set the libraries session.query(WorkflowLibraryModel).filter(WorkflowLibraryModel.workflow_spec_id == local_spec.id).delete() for library in specdict['libraries']: @@ -167,7 +170,7 @@ def create_or_update_local_spec(remote,workflow_spec_id): local_lib = WorkflowLibraryModel(workflow_spec_id=local_spec.id, library_spec_id=library['id']) session.add(local_lib) - session.add(local_spec) + session.commit() def update_or_create_current_file(remote,workflow_spec_id,updatefile): currentfile = file_get(workflow_spec_id, updatefile['filename']) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index c430879a..b964c519 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -27,8 +27,19 @@ class Task(object): # Field Types - FIELD_TYPE_FILE = "file" + FIELD_TYPE_STRING = "string" + FIELD_TYPE_LONG = "long" + FIELD_TYPE_BOOLEAN = "boolean" + FIELD_TYPE_DATE = "date" + FIELD_TYPE_ENUM = "enum" + FIELD_TYPE_TEXTAREA = "textarea" # textarea: Multiple lines of text FIELD_TYPE_AUTO_COMPLETE = "autocomplete" + FIELD_TYPE_FILE = "file" + FIELD_TYPE_FILES = "files" # files: Multiple files + FIELD_TYPE_TEL = "tel" # tel: Phone number + FIELD_TYPE_EMAIL = "email" # email: Email address + FIELD_TYPE_URL = "url" # url: Website address + FIELD_PROP_AUTO_COMPLETE_MAX = "autocomplete_num" # Not used directly, passed in from the front end. # Required field @@ -77,8 +88,6 @@ class Task(object): FIELD_PROP_HELP = "help" - - ########################################################################## def __init__(self, id, name, title, type, state, lane, form, documentation, data, @@ -103,6 +112,11 @@ class Task(object): def valid_property_names(cls): return [value for name, value in vars(cls).items() if name.startswith('FIELD_PROP')] + @classmethod + def valid_field_types(cls): + return [value for name, value in vars(cls).items() if name.startswith('FIELD_TYPE')] + + class OptionSchema(ma.Schema): class Meta: fields = ["id", "name", "data"] diff --git a/crc/models/file.py b/crc/models/file.py index da59cff7..a8e5b197 100644 --- a/crc/models/file.py +++ b/crc/models/file.py @@ -1,6 +1,9 @@ import enum import urllib + +import connexion import flask +from flask import url_for from marshmallow import INCLUDE, EXCLUDE, Schema from marshmallow.fields import Method from marshmallow_enum import EnumField @@ -153,10 +156,14 @@ class FileSchema(Schema): def get_url(self, obj): token = 'not_available' + base_url = connexion.request.host_url + if obj.id is None: + return "" # We can't return a url for a file that isn't stored yet. + file_url = url_for("/v1_0.crc_api_file_get_file_data_link", file_id=obj.id) if hasattr(flask.g, 'user'): token = flask.g.user.encode_auth_token() - return (app.config['APPLICATION_ROOT'] + 'file/' + - str(obj.id) + '/download?auth_token=' + urllib.parse.quote_plus(token)) + url = base_url + file_url + '?auth_token=' + urllib.parse.quote_plus(token) + return url class LookupFileModel(db.Model): diff --git a/crc/models/study.py b/crc/models/study.py index cc999720..adb61680 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -162,13 +162,14 @@ class Category(object): self.name = model.name self.display_name = model.display_name self.display_order = model.display_order + self.admin = model.admin class CategorySchema(ma.Schema): workflows = fields.List(fields.Nested(WorkflowMetadataSchema), dump_only=True) class Meta: model = Category - additional = ["id", "name", "display_name", "display_order"] + additional = ["id", "name", "display_name", "display_order", "admin"] unknown = INCLUDE diff --git a/crc/models/workflow.py b/crc/models/workflow.py index 1c586f05..4c2611d9 100644 --- a/crc/models/workflow.py +++ b/crc/models/workflow.py @@ -16,7 +16,7 @@ class WorkflowSpecCategoryModel(db.Model): name = db.Column(db.String) display_name = db.Column(db.String) display_order = db.Column(db.Integer) - + admin = db.Column(db.Boolean) class WorkflowSpecCategoryModelSchema(SQLAlchemyAutoSchema): class Meta: diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 825bd02b..732a9f5a 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -1,5 +1,6 @@ import re +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine from SpiffWorkflow.serializer.exceptions import MissingSpecError from SpiffWorkflow.util.metrics import timeit, firsttime, sincetime from lxml import etree @@ -8,7 +9,6 @@ from datetime import datetime from typing import List from SpiffWorkflow import Task as SpiffTask, WorkflowException, Task -from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent @@ -30,9 +30,8 @@ from crc.services.file_service import FileService from crc import app from crc.services.user_service import UserService -from difflib import SequenceMatcher -class CustomBpmnScriptEngine(BpmnScriptEngine): +class CustomBpmnScriptEngine(PythonScriptEngine): """This is a custom script processor that can be easily injected into Spiff Workflow. It will execute python code read in from the bpmn. It will also make any scripts in the scripts directory available for execution. """ @@ -79,7 +78,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id) else: augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id) - exp, valid = self.validateExpression(expression) + exp, valid = self.validate_expression(expression) return self._eval(exp, external_methods=augmentMethods, **task.data) except Exception as e: diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 47ce69bf..f9dc0c10 100755 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -132,6 +132,13 @@ class WorkflowService(object): spec, only completing the required fields, rather than everything. """ + # Get workflow state dictionary, make sure workflow is not disabled. + if validate_study_id is not None: + study_model = session.query(StudyModel).filter(StudyModel.id == validate_study_id).first() + spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id == spec_id).first() + status = StudyService._get_study_status(study_model) + if status[spec_model.name]['status'] == 'disabled': + raise ApiError(code='disabled_workflow', message=f"This workflow is disabled. {status[spec_model.name]['message']}") workflow_model = WorkflowService.make_test_workflow(spec_id, validate_study_id) try: processor = WorkflowProcessor(workflow_model, validate_only=True) @@ -203,6 +210,7 @@ class WorkflowService(object): task_name = task.get_name()) # Assure field has valid properties WorkflowService.check_field_properties(field, task) + WorkflowService.check_field_type(field, task) # Process the label of the field if it is dynamic. if field.has_property(Task.FIELD_PROP_LABEL_EXPRESSION): @@ -294,6 +302,14 @@ class WorkflowService(object): f'The field {field.id} contains an unsupported ' f'property: {name}', task=task) + @staticmethod + def check_field_type(field, task): + """Assures that the field type is valid.""" + valid_types = Task.valid_field_types() + if field.type not in valid_types: + raise ApiError.from_task("invalid_field_type", + f'The field {field.id} has an unknown field type ' + f'{field.type}, valid types include {valid_types}', task=task) @staticmethod def post_process_form(task): @@ -434,10 +450,15 @@ class WorkflowService(object): if len(field.options) > 0: random_choice = random.choice(field.options) if isinstance(random_choice, dict): - return {'value': random_choice['id'], 'label': random_choice['name'], 'data': random_choice['data']} + random_value = {'value': random_choice['id'], 'label': random_choice['name'], 'data': random_choice['data']} else: # fixme: why it is sometimes an EnumFormFieldOption, and other times not? - return {'value': random_choice.id, 'label': random_choice.name} + random_value = {'value': random_choice.id, 'label': random_choice.name} + if field.has_property(Task.FIELD_PROP_ENUM_TYPE) and field.get_property(Task.FIELD_PROP_ENUM_TYPE) == 'checkbox': + return [random_value] + else: + return random_value + else: raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s)," " with no options" % field.id, task) @@ -446,19 +467,25 @@ class WorkflowService(object): # from the lookup model lookup_model = LookupService.get_lookup_model(task, field) if field.has_property(Task.FIELD_PROP_LDAP_LOOKUP): # All ldap records get the same person. - return WorkflowService._random_ldap_record() + random_value = WorkflowService._random_ldap_record() elif lookup_model: data = db.session.query(LookupDataModel).filter( LookupDataModel.lookup_file_model == lookup_model).limit(10).all() options = [{"value": d.value, "label": d.label, "data": d.data} for d in data] if len(options) > 0: - return random.choice(options) + random_value = random.choice(options) else: raise ApiError.from_task("invalid enum", "You specified an enumeration field (%s)," " with no options" % field.id, task) else: raise ApiError.from_task("unknown_lookup_option", "The settings for this auto complete field " "are incorrect: %s " % field.id, task) + if field.has_property(Task.FIELD_PROP_ENUM_TYPE) and field.get_property(Task.FIELD_PROP_ENUM_TYPE) == 'checkbox': + return [random_value] + else: + return random_value + + elif field.type == "long": return random.randint(1, 1000) elif field.type == 'boolean': diff --git a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn index 7df1cd1e..ef435a9d 100644 --- a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn +++ b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn @@ -1,9 +1,6 @@ - + + + + + + + + diff --git a/example_data.py b/example_data.py index 1fdfc965..2b5dc14a 100644 --- a/example_data.py +++ b/example_data.py @@ -249,7 +249,8 @@ class ExampleDataLoader: id=0, name='test_category', display_name='Test Category', - display_order=0 + display_order=0, + admin=False ) db.session.add(category) db.session.commit() @@ -394,4 +395,4 @@ for k in investigators.keys(): del investigator cnt_subs = len(subs.keys()) -del investigators \ No newline at end of file +del investigators diff --git a/migrations/versions/5c63a89ee7b7_.py b/migrations/versions/5c63a89ee7b7_.py new file mode 100644 index 00000000..e16278aa --- /dev/null +++ b/migrations/versions/5c63a89ee7b7_.py @@ -0,0 +1,43 @@ +"""empty message + +Revision ID: 5c63a89ee7b7 +Revises: 9afbd55082a0 +Create Date: 2021-09-29 10:24:20.413807 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5c63a89ee7b7' +down_revision = '9afbd55082a0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_foreign_key(None, 'user', 'ldap_model', ['uid'], ['uid']) + op.drop_column('user', 'affiliation') + op.drop_column('user', 'email_address') + op.drop_column('user', 'eppn') + op.drop_column('user', 'title') + op.drop_column('user', 'first_name') + op.drop_column('user', 'last_name') + op.drop_column('user', 'display_name') + op.add_column('workflow_spec_category', sa.Column('admin', sa.Boolean(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('workflow_spec_category', 'admin') + op.add_column('user', sa.Column('display_name', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('last_name', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('first_name', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('eppn', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('email_address', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('user', sa.Column('affiliation', sa.VARCHAR(), autoincrement=False, nullable=True)) + # ### end Alembic commands ### diff --git a/tests/data/enum_checkbox/enum_checkbox.bpmn b/tests/data/enum_checkbox/enum_checkbox.bpmn new file mode 100644 index 00000000..f74586fd --- /dev/null +++ b/tests/data/enum_checkbox/enum_checkbox.bpmn @@ -0,0 +1,67 @@ + + + + + Flow_1ui50vr + + + + + + + + + + + + + + + + + Flow_1ui50vr + Flow_07pr9lr + + + + # Enum data +{% for i in range(some_field | length) %} + {{ some_field[i] }} +{% endfor %} + Flow_07pr9lr + Flow_13oillk + + + Flow_13oillk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/invalid_custom_field/invalid_custom_field.bpmn b/tests/data/invalid_custom_field/invalid_custom_field.bpmn new file mode 100644 index 00000000..6c13a74b --- /dev/null +++ b/tests/data/invalid_custom_field/invalid_custom_field.bpmn @@ -0,0 +1,43 @@ + + + + + SequenceFlow_12ulmn8 + + + SequenceFlow_06786ls + + + + + + + + SequenceFlow_12ulmn8 + SequenceFlow_06786ls + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/data/pb_responses/_get_study_status.json b/tests/data/pb_responses/_get_study_status.json new file mode 100644 index 00000000..ec5d5033 --- /dev/null +++ b/tests/data/pb_responses/_get_study_status.json @@ -0,0 +1,5 @@ +[{ + "core_info": {"status": "required", "message": "This workflow is always required and recommended that it is completed after your Protocol Builder entries are done and the Personnel workflow completed"}, + "protocol": {"status": "required", "message": "required"}, + "data_security_plan": {"status": "disabled", "message": "This is my mocked disable message."} +}] diff --git a/tests/files/test_files_api.py b/tests/files/test_files_api.py index 17f49e9e..6d4e8deb 100644 --- a/tests/files/test_files_api.py +++ b/tests/files/test_files_api.py @@ -78,7 +78,8 @@ class TestFilesApi(BaseTest): data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' % - (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, follow_redirects=True, + (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, + follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) @@ -94,7 +95,8 @@ class TestFilesApi(BaseTest): data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' % - (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, follow_redirects=True, + (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, + follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) @@ -176,13 +178,25 @@ class TestFilesApi(BaseTest): file.name = "silly_new_name.bpmn" rv = self.app.put('/v1.0/file/%i' % file.id, - content_type="application/json", - data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers()) + content_type="application/json", + data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers()) self.assert_success(rv) db_file = session.query(FileModel).filter_by(id=file.id).first() self.assertIsNotNone(db_file) self.assertEqual(file.name, db_file.name) + def test_load_valid_url_for_files(self): + self.load_example_data() + self.create_reference_document() + file: FileModel = session.query(FileModel).filter(FileModel.is_reference == False).first() + rv = self.app.get('/v1.0/file/%i' % file.id, content_type="application/json", headers=self.logged_in_headers()) + self.assert_success(rv) + file_json = json.loads(rv.get_data(as_text=True)) + print(file_json) + self.assertIsNotNone(file_json['url']) + file_data_rv = self.app.get(file_json['url']) + self.assert_success(file_data_rv) + def test_update_file_data(self): self.load_example_data() spec = session.query(WorkflowSpecModel).first() @@ -209,7 +223,7 @@ class TestFilesApi(BaseTest): file_data = FileService.get_file_data(file_model.id) self.assertEqual(2, file_data.version) - rv = self.app.get('/v1.0/file/%i/data' % file_json['id'], headers=self.logged_in_headers()) + rv = self.app.get('/v1.0/file/%i/data' % file_json['id'], headers=self.logged_in_headers()) self.assert_success(rv) data = rv.get_data() self.assertIsNotNone(data) @@ -262,7 +276,8 @@ class TestFilesApi(BaseTest): data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' % - (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, follow_redirects=True, + (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, + follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) @@ -296,7 +311,8 @@ class TestFilesApi(BaseTest): data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' % - (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, follow_redirects=True, + (workflow.study_id, workflow.id, task.get_name(), correct_name), data=data, + follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) @@ -315,8 +331,6 @@ class TestFilesApi(BaseTest): rv = self.app.get('/v1.0/file/%i' % file_id, headers=self.logged_in_headers()) self.assertEqual(404, rv.status_code) - - def test_change_primary_bpmn(self): self.load_example_data() spec = session.query(WorkflowSpecModel).first() @@ -332,19 +346,17 @@ class TestFilesApi(BaseTest): file = FileModelSchema().load(json_data, session=session) # Delete the primary BPMN file for the workflow. - orig_model = session.query(FileModel).\ - filter(FileModel.primary == True).\ + orig_model = session.query(FileModel). \ + filter(FileModel.primary == True). \ filter(FileModel.workflow_spec_id == spec.id).first() rv = self.app.delete('/v1.0/file?file_id=%s' % orig_model.id, headers=self.logged_in_headers()) - # Set that new file to be the primary BPMN, assure it has a primary_process_id file.primary = True rv = self.app.put('/v1.0/file/%i' % file.id, - content_type="application/json", - data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers()) + content_type="application/json", + data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers()) self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) self.assertTrue(json_data['primary']) self.assertIsNotNone(json_data['primary_process_id']) - diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index fc560761..3ac19731 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -69,6 +69,7 @@ class TestStudyApi(BaseTest): category = study.categories[0] self.assertEqual("test_category", category['name']) self.assertEqual("Test Category", category['display_name']) + self.assertEqual(False, category['admin']) self.assertEqual(1, len(category["workflows"])) workflow = category["workflows"][0] self.assertEqual("random_fact", workflow["name"]) diff --git a/tests/workflow/test_workflow_enum_checkbox.py b/tests/workflow/test_workflow_enum_checkbox.py new file mode 100644 index 00000000..55f94300 --- /dev/null +++ b/tests/workflow/test_workflow_enum_checkbox.py @@ -0,0 +1,22 @@ +from tests.base_test import BaseTest + + +class TestEnumCheckbox(BaseTest): + + def test_enum_checkbox_validation(self): + spec_model = self.load_test_spec('enum_checkbox') + rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers()) + self.assertEqual([], rv.json) + + def test_enum_checkbox(self): + workflow = self.create_workflow('enum_checkbox') + workflow_api = self.get_workflow_api(workflow) + task = workflow_api.next_task + data_values = [{'value': 'value_1', 'label': 'value_1'}, {'value': 'value_3', 'label': 'value_3'}] + + self.complete_form(workflow, task, {'some_field': data_values}) + workflow_api = self.get_workflow_api(workflow) + task = workflow_api.next_task + + self.assertIn("{'value': 'value_1', 'label': 'value_1'}", task.documentation) + self.assertIn("{'value': 'value_3', 'label': 'value_3'}", task.documentation) diff --git a/tests/workflow/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py index 3c144d39..c0e8937f 100644 --- a/tests/workflow/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -9,6 +9,7 @@ from tests.base_test import BaseTest from crc import session, app from crc.api.common import ApiErrorSchema from crc.models.protocol_builder import ProtocolBuilderStudySchema +from crc.models.study import StudyModel from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.workflow_service import WorkflowService @@ -147,3 +148,41 @@ class TestWorkflowSpecValidation(BaseTest): self.assertIn('enum_with_default', final_data) self.assertEqual('maybe', final_data['enum_with_default']['value']) + def test_invalid_custom_field(self): + self.load_example_data() + errors = self.validate_workflow("invalid_custom_field") + self.assertEqual(1, len(errors)) + self.assertEqual("invalid_field_type", errors[0]['code']) + + @patch('crc.services.study_service.StudyService._get_study_status') + def test_disabled_spec_validation(self, mock_status): + """A disabled workflow spec should fail validation""" + app.config['PB_ENABLED'] = True + self.load_example_data() + study_model = session.query(StudyModel).first() + + # workflow spec to validate + spec_model = WorkflowSpecModel(id='data_security_plan', + name='data_security_plan', + display_name='Data Security Plan', + description='Data Security Plan', + is_master_spec=False, + category_id=0, + display_order=0, + standalone=False, + library=False) + session.add(spec_model) + session.commit() + + # This response sets the status for data_security_plan to disabled + status_response = self.protocol_builder_response('_get_study_status.json') + mock_status.return_value = json.loads(status_response)[0] + + # This should raise an ApiError which we can see in the json data + rv = self.app.get('/v1.0/workflow-specification/%s/validate?study_id=%s' % (spec_model.id, study_model.id), headers=self.logged_in_headers()) + self.assert_success(rv) + json_data = json.loads(rv.get_data()) + self.assertEqual(1, len(json_data)) + api_error = json_data[0] + self.assertEqual('disabled_workflow', api_error['code']) + self.assertEqual('This workflow is disabled. This is my mocked disable message.', api_error['message'])