commit
1b77681af9
43
Dockerfile
43
Dockerfile
|
@ -1,27 +1,22 @@
|
|||
FROM python:3.7
|
||||
FROM python:3.7-slim
|
||||
|
||||
ENV PATH=/root/.local/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
|
||||
WORKDIR /app
|
||||
COPY Pipfile Pipfile.lock /app/
|
||||
|
||||
# install node and yarn
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install postgresql-client
|
||||
RUN set -xe \
|
||||
&& pip install pipenv \
|
||||
&& apt-get update -q \
|
||||
&& apt-get install -y -q \
|
||||
gcc python3-dev libssl-dev \
|
||||
curl postgresql-client git-core \
|
||||
gunicorn3 postgresql-client \
|
||||
&& pipenv install --dev \
|
||||
&& apt-get remove -y gcc python3-dev libssl-dev \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /app \
|
||||
&& useradd _gunicorn --no-create-home --user-group
|
||||
|
||||
# config project dir
|
||||
RUN mkdir /crc-workflow
|
||||
WORKDIR /crc-workflow
|
||||
|
||||
# install python requirements
|
||||
RUN pip install pipenv
|
||||
ADD Pipfile /crc-workflow/
|
||||
ADD Pipfile.lock /crc-workflow/
|
||||
RUN pipenv install --dev
|
||||
|
||||
# include rejoiner code (gets overriden by local changes)
|
||||
COPY . /crc-workflow/
|
||||
|
||||
# run webserver by default
|
||||
ENV FLASK_APP=./crc/__init__.py
|
||||
CMD ["pipenv", "run", "python", "./run.py"]
|
||||
|
||||
# expose ports
|
||||
EXPOSE 5000
|
||||
COPY . /app/
|
||||
WORKDIR /app
|
||||
|
|
6
Pipfile
6
Pipfile
|
@ -5,6 +5,7 @@ verify_ssl = true
|
|||
|
||||
[dev-packages]
|
||||
pytest = "*"
|
||||
pbr = "*"
|
||||
|
||||
[packages]
|
||||
connexion = {extras = ["swagger-ui"],version = "*"}
|
||||
|
@ -24,18 +25,19 @@ pyjwt = "*"
|
|||
requests = "*"
|
||||
xlsxwriter = "*"
|
||||
webtest = "*"
|
||||
spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "bug/the_horror"}
|
||||
spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"}
|
||||
alembic = "*"
|
||||
coverage = "*"
|
||||
sphinx = "*"
|
||||
recommonmark = "*"
|
||||
psycopg2-binary = "*"
|
||||
docxtpl = "*"
|
||||
flask-sso = "*"
|
||||
python-dateutil = "*"
|
||||
pandas = "*"
|
||||
xlrd = "*"
|
||||
ldap3 = "*"
|
||||
gunicorn = "*"
|
||||
werkzeug = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.7"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "bd289126c41b0f5f2761f0415d85e1110a584256460374a9ce4cda07c0033ddd"
|
||||
"sha256": "979f996148ee181e3e0af2a3777aa1d00d0fd5d943d49df65963e694b8a88871"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
|
@ -96,12 +96,6 @@
|
|||
],
|
||||
"version": "==3.6.3.0"
|
||||
},
|
||||
"blinker": {
|
||||
"hashes": [
|
||||
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
|
||||
],
|
||||
"version": "==1.4"
|
||||
},
|
||||
"celery": {
|
||||
"hashes": [
|
||||
"sha256:108a0bf9018a871620936c33a3ee9f6336a89f8ef0a0f567a9001f4aa361415f",
|
||||
|
@ -241,11 +235,11 @@
|
|||
},
|
||||
"docxtpl": {
|
||||
"hashes": [
|
||||
"sha256:16a76d360c12f7da3a28821fc740b9a84b891895233493ff0b002ffaa6026905",
|
||||
"sha256:f19adf2a713a753c1e056ef0ce395bc8da62d495b091ebf9fe67dfc6d1115f9f"
|
||||
"sha256:0e031ea5da63339f2bac0fd7eb7b3b137303571a9a92c950501148240ea22047",
|
||||
"sha256:45f04661b9ab1fd66b975a0a547b30c8811f457bef2f85249c2f3c5784a00052"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.9.2"
|
||||
"version": "==0.10.0"
|
||||
},
|
||||
"et-xmlfile": {
|
||||
"hashes": [
|
||||
|
@ -302,17 +296,10 @@
|
|||
},
|
||||
"flask-sqlalchemy": {
|
||||
"hashes": [
|
||||
"sha256:0078d8663330dc05a74bc72b3b6ddc441b9a744e2f56fe60af1a5bfc81334327",
|
||||
"sha256:6974785d913666587949f7c2946f7001e4fa2cb2d19f4e69ead02e4b8f50b33d"
|
||||
"sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5",
|
||||
"sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e"
|
||||
],
|
||||
"version": "==2.4.1"
|
||||
},
|
||||
"flask-sso": {
|
||||
"hashes": [
|
||||
"sha256:541a8a2387c6eac4325c53f8f7f863a03173b37aa558a37a430010d7fc1a3633"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.4.0"
|
||||
"version": "==2.4.3"
|
||||
},
|
||||
"future": {
|
||||
"hashes": [
|
||||
|
@ -320,6 +307,14 @@
|
|||
],
|
||||
"version": "==0.18.2"
|
||||
},
|
||||
"gunicorn": {
|
||||
"hashes": [
|
||||
"sha256:1904bb2b8a43658807108d59c3f3d56c2b6121a701161de0ddf9ad140073c626",
|
||||
"sha256:cd4a810dd51bf497552cf3f863b575dabd73d6ad6a91075b65936b151cbf4f9c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==20.0.4"
|
||||
},
|
||||
"httpretty": {
|
||||
"hashes": [
|
||||
"sha256:24a6fd2fe1c76e94801b74db8f52c0fb42718dc4a199a861b305b1a492b9d868"
|
||||
|
@ -550,25 +545,25 @@
|
|||
},
|
||||
"pandas": {
|
||||
"hashes": [
|
||||
"sha256:07c1b58936b80eafdfe694ce964ac21567b80a48d972879a359b3ebb2ea76835",
|
||||
"sha256:0ebe327fb088df4d06145227a4aa0998e4f80a9e6aed4b61c1f303bdfdf7c722",
|
||||
"sha256:11c7cb654cd3a0e9c54d81761b5920cdc86b373510d829461d8f2ed6d5905266",
|
||||
"sha256:12f492dd840e9db1688126216706aa2d1fcd3f4df68a195f9479272d50054645",
|
||||
"sha256:167a1315367cea6ec6a5e11e791d9604f8e03f95b57ad227409de35cf850c9c5",
|
||||
"sha256:1a7c56f1df8d5ad8571fa251b864231f26b47b59cbe41aa5c0983d17dbb7a8e4",
|
||||
"sha256:1fa4bae1a6784aa550a1c9e168422798104a85bf9c77a1063ea77ee6f8452e3a",
|
||||
"sha256:32f42e322fb903d0e189a4c10b75ba70d90958cc4f66a1781ed027f1a1d14586",
|
||||
"sha256:387dc7b3c0424327fe3218f81e05fc27832772a5dffbed385013161be58df90b",
|
||||
"sha256:6597df07ea361231e60c00692d8a8099b519ed741c04e65821e632bc9ccb924c",
|
||||
"sha256:743bba36e99d4440403beb45a6f4f3a667c090c00394c176092b0b910666189b",
|
||||
"sha256:858a0d890d957ae62338624e4aeaf1de436dba2c2c0772570a686eaca8b4fc85",
|
||||
"sha256:863c3e4b7ae550749a0bb77fa22e601a36df9d2905afef34a6965bed092ba9e5",
|
||||
"sha256:a210c91a02ec5ff05617a298ad6f137b9f6f5771bf31f2d6b6367d7f71486639",
|
||||
"sha256:ca84a44cf727f211752e91eab2d1c6c1ab0f0540d5636a8382a3af428542826e",
|
||||
"sha256:d234bcf669e8b4d6cbcd99e3ce7a8918414520aeb113e2a81aeb02d0a533d7f7"
|
||||
"sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46",
|
||||
"sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5",
|
||||
"sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa",
|
||||
"sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc",
|
||||
"sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678",
|
||||
"sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc",
|
||||
"sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31",
|
||||
"sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8",
|
||||
"sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6",
|
||||
"sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa",
|
||||
"sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4",
|
||||
"sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874",
|
||||
"sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd",
|
||||
"sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4",
|
||||
"sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126",
|
||||
"sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.3"
|
||||
"version": "==1.0.4"
|
||||
},
|
||||
"psycopg2-binary": {
|
||||
"hashes": [
|
||||
|
@ -711,10 +706,10 @@
|
|||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
|
||||
"sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
|
||||
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
|
||||
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
|
||||
],
|
||||
"version": "==1.14.0"
|
||||
"version": "==1.15.0"
|
||||
},
|
||||
"snowballstemmer": {
|
||||
"hashes": [
|
||||
|
@ -732,11 +727,11 @@
|
|||
},
|
||||
"sphinx": {
|
||||
"hashes": [
|
||||
"sha256:62edfd92d955b868d6c124c0942eba966d54b5f3dcb4ded39e65f74abac3f572",
|
||||
"sha256:f5505d74cf9592f3b997380f9bdb2d2d0320ed74dd69691e3ee0644b956b8d83"
|
||||
"sha256:779a519adbd3a70fc7c468af08c5e74829868b0a5b34587b33340e010291856c",
|
||||
"sha256:ea64df287958ee5aac46be7ac2b7277305b0381d213728c3a49d8bb9b8415807"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.3"
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"sphinxcontrib-applehelp": {
|
||||
"hashes": [
|
||||
|
@ -783,7 +778,7 @@
|
|||
"spiffworkflow": {
|
||||
"editable": true,
|
||||
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
||||
"ref": "7dc54f1205de7006bdda6d966dc957e558f3c7f3"
|
||||
"ref": "c8d87826d496af825a184bdc3f0a751e603cfe44"
|
||||
},
|
||||
"sqlalchemy": {
|
||||
"hashes": [
|
||||
|
@ -868,6 +863,7 @@
|
|||
"sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43",
|
||||
"sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.1"
|
||||
},
|
||||
"xlrd": {
|
||||
|
@ -924,6 +920,14 @@
|
|||
],
|
||||
"version": "==20.4"
|
||||
},
|
||||
"pbr": {
|
||||
"hashes": [
|
||||
"sha256:07f558fece33b05caf857474a366dfcc00562bca13dd8b47b2b3e22d9f9bf55c",
|
||||
"sha256:579170e23f8e0c2f24b0de612f71f648eccb79fb1322c814ae6b3c07b5ba23e8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.4.5"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
|
@ -955,10 +959,10 @@
|
|||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
|
||||
"sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
|
||||
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
|
||||
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
|
||||
],
|
||||
"version": "==1.14.0"
|
||||
"version": "==1.15.0"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
|
|
|
@ -13,6 +13,9 @@ DEVELOPMENT = environ.get('DEVELOPMENT', default="true") == "true"
|
|||
TESTING = environ.get('TESTING', default="false") == "true"
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true") or (not DEVELOPMENT and not TESTING)
|
||||
|
||||
# Add trailing slash to base path
|
||||
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
|
||||
|
||||
DB_HOST = environ.get('DB_HOST', default="localhost")
|
||||
DB_PORT = environ.get('DB_PORT', default="5432")
|
||||
DB_NAME = environ.get('DB_NAME', default="crc_dev")
|
||||
|
@ -22,39 +25,20 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
|||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
TOKEN_AUTH_TTL_HOURS = environ.get('TOKEN_AUTH_TTL_HOURS', default=4)
|
||||
TOKEN_AUTH_TTL_HOURS = int(environ.get('TOKEN_AUTH_TTL_HOURS', default=4))
|
||||
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
|
||||
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
|
||||
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
|
||||
|
||||
#: Default attribute map for single signon.
|
||||
SSO_LOGIN_URL = '/login'
|
||||
SSO_ATTRIBUTE_MAP = {
|
||||
'eppn': (False, 'eppn'), # dhf8r@virginia.edu
|
||||
'uid': (True, 'uid'), # dhf8r
|
||||
'givenName': (False, 'first_name'), # Daniel
|
||||
'mail': (False, 'email_address'), # dhf8r@Virginia.EDU
|
||||
'sn': (False, 'last_name'), # Funk
|
||||
'affiliation': (False, 'affiliation'), # 'staff@virginia.edu;member@virginia.edu'
|
||||
'displayName': (False, 'display_name'), # Daniel Harold Funk
|
||||
'title': (False, 'title') # SOFTWARE ENGINEER V
|
||||
}
|
||||
|
||||
# %s/%i placeholders expected for uva_id and study_id in various calls.
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/pb/")
|
||||
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/pb/").strip('/') + '/' # Trailing slash required
|
||||
PB_USER_STUDIES_URL = environ.get('PB_USER_STUDIES_URL', default=PB_BASE_URL + "user_studies?uva_id=%s")
|
||||
PB_INVESTIGATORS_URL = environ.get('PB_INVESTIGATORS_URL', default=PB_BASE_URL + "investigators?studyid=%i")
|
||||
PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL + "required_docs?studyid=%i")
|
||||
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
|
||||
|
||||
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu")
|
||||
LDAP_TIMEOUT_SEC = environ.get('LDAP_TIMEOUT_SEC', default=3)
|
||||
print('=== USING DEFAULT CONFIG: ===')
|
||||
print('DB_HOST = ', DB_HOST)
|
||||
print('CORS_ALLOW_ORIGINS = ', CORS_ALLOW_ORIGINS)
|
||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
||||
print('TESTING = ', TESTING)
|
||||
print('PRODUCTION = ', PRODUCTION)
|
||||
print('PB_BASE_URL = ', PB_BASE_URL)
|
||||
print('LDAP_URL = ', LDAP_URL)
|
||||
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://
|
||||
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=3))
|
||||
|
||||
|
||||
|
|
|
@ -1,11 +1,28 @@
|
|||
import os
|
||||
from os import environ
|
||||
|
||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
NAME = "CR Connect Workflow"
|
||||
DEVELOPMENT = True
|
||||
TESTING = True
|
||||
SQLALCHEMY_DATABASE_URI = "postgresql://crc_user:crc_pass@localhost:5432/crc_test"
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
PB_ENABLED = False
|
||||
|
||||
# This is here, for when we are running the E2E Tests in the frontend code bases.
|
||||
# which will set the TESTING envronment to true, causing this to execute, but we need
|
||||
# to respect the environment variables in that case.
|
||||
# when running locally the defaults apply, meaning we use crc_test for doing the tests
|
||||
# locally, and we don't over-write the database. Did you read this far? Have a cookie!
|
||||
DB_HOST = environ.get('DB_HOST', default="localhost")
|
||||
DB_PORT = environ.get('DB_PORT', default="5432")
|
||||
DB_NAME = environ.get('DB_NAME', default="crc_test")
|
||||
DB_USER = environ.get('DB_USER', default="crc_user")
|
||||
DB_PASSWORD = environ.get('DB_PASSWORD', default="crc_pass")
|
||||
SQLALCHEMY_DATABASE_URI = environ.get(
|
||||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
|
||||
print('### USING TESTING CONFIG: ###')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
|
|
|
@ -8,18 +8,7 @@ SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
|
|||
TOKEN_AUTH_TTL_HOURS = 2
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
FRONTEND_AUTH_CALLBACK = "http://localhost:4200/session" # Not Required
|
||||
|
||||
#: Default attribute map for single signon.
|
||||
SSO_ATTRIBUTE_MAP = {
|
||||
'eppn': (False, 'eppn'), # dhf8r@virginia.edu
|
||||
'uid': (True, 'uid'), # dhf8r
|
||||
'givenName': (False, 'first_name'), # Daniel
|
||||
'mail': (False, 'email_address'), # dhf8r@Virginia.EDU
|
||||
'sn': (False, 'last_name'), # Funk
|
||||
'affiliation': (False, 'affiliation'), # 'staff@virginia.edu;member@virginia.edu'
|
||||
'displayName': (False, 'display_name'), # Daniel Harold Funk
|
||||
'title': (False, 'title') # SOFTWARE ENGINEER V
|
||||
}
|
||||
PB_ENABLED = False
|
||||
|
||||
print('+++ USING TRAVIS TESTING CONFIG: +++')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
|
|
|
@ -6,7 +6,6 @@ from flask_cors import CORS
|
|||
from flask_marshmallow import Marshmallow
|
||||
from flask_migrate import Migrate
|
||||
from flask_sqlalchemy import SQLAlchemy
|
||||
from flask_sso import SSO
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
@ -31,17 +30,26 @@ session = db.session
|
|||
|
||||
migrate = Migrate(app, db)
|
||||
ma = Marshmallow(app)
|
||||
sso = SSO(app=app)
|
||||
|
||||
from crc import models
|
||||
from crc import api
|
||||
|
||||
connexion_app.add_api('api.yml')
|
||||
connexion_app.add_api('api.yml', base_path='/v1.0')
|
||||
|
||||
# Convert list of allowed origins to list of regexes
|
||||
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
|
||||
cors = CORS(connexion_app.app, origins=origins_re)
|
||||
|
||||
print('=== USING THESE CONFIG SETTINGS: ===')
|
||||
print('DB_HOST = ', )
|
||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||
print('DEVELOPMENT = ', app.config['DEVELOPMENT'])
|
||||
print('TESTING = ', app.config['TESTING'])
|
||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
||||
print('PB_ENABLED = ', app.config['PB_ENABLED'])
|
||||
|
||||
@app.cli.command()
|
||||
def load_example_data():
|
||||
|
@ -49,3 +57,11 @@ def load_example_data():
|
|||
from example_data import ExampleDataLoader
|
||||
ExampleDataLoader.clean_db()
|
||||
ExampleDataLoader().load_all()
|
||||
|
||||
|
||||
@app.cli.command()
|
||||
def load_example_rrt_data():
|
||||
"""Load example data into the database."""
|
||||
from example_data import ExampleDataLoader
|
||||
ExampleDataLoader.clean_db()
|
||||
ExampleDataLoader().load_rrt()
|
||||
|
|
137
crc/api.yml
137
crc/api.yml
|
@ -56,7 +56,7 @@ paths:
|
|||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: redirect_url
|
||||
- name: redirect
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
|
@ -82,7 +82,7 @@ paths:
|
|||
# /v1.0/study
|
||||
/study:
|
||||
get:
|
||||
operationId: crc.api.study.all_studies
|
||||
operationId: crc.api.study.user_studies
|
||||
summary: Provides a list of studies related to the current user.
|
||||
tags:
|
||||
- Studies
|
||||
|
@ -109,7 +109,24 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Study"
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Study"
|
||||
/study/all:
|
||||
get:
|
||||
operationId: crc.api.study.all_studies
|
||||
summary: Provides a list of studies
|
||||
tags:
|
||||
- Studies
|
||||
responses:
|
||||
'200':
|
||||
description: An array of studies, with submitted files, ordered by the last modified date.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Study"
|
||||
/study/{study_id}:
|
||||
parameters:
|
||||
- name: study_id
|
||||
|
@ -156,26 +173,6 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Study"
|
||||
/study-update/{study_id}:
|
||||
post:
|
||||
operationId: crc.api.study.post_update_study_from_protocol_builder
|
||||
summary: If the study is up-to-date with Protocol Builder, returns a 304 Not Modified. If out of date, return a 202 Accepted and study state changes to updating.
|
||||
tags:
|
||||
- Study Status
|
||||
parameters:
|
||||
- name: study_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the study that should be checked for updates.
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
responses:
|
||||
'304':
|
||||
description: Study is currently up to date and does not need to be reloaded from Protocol Builder
|
||||
'202':
|
||||
description: Request accepted, will preform an update. Study state set to "updating"
|
||||
|
||||
/workflow-specification:
|
||||
get:
|
||||
operationId: crc.api.workflow.all_specifications
|
||||
|
@ -360,24 +357,12 @@ paths:
|
|||
description: The unique id of a workflow specification
|
||||
schema:
|
||||
type: string
|
||||
- name: study_id
|
||||
in: query
|
||||
required: false
|
||||
description: The unique id of a study
|
||||
schema:
|
||||
type: integer
|
||||
- name: workflow_id
|
||||
in: query
|
||||
required: false
|
||||
description: The unique id of a workflow instance
|
||||
schema:
|
||||
type: integer
|
||||
- name: task_id
|
||||
in: query
|
||||
required: false
|
||||
description: The unique id of a workflow task
|
||||
schema:
|
||||
type: string
|
||||
- name: form_field_key
|
||||
in: query
|
||||
required: false
|
||||
|
@ -473,6 +458,12 @@ paths:
|
|||
description: The id of the File requested
|
||||
schema:
|
||||
type: integer
|
||||
- name: version
|
||||
in: query
|
||||
required: false
|
||||
description: The version of the file, or none for latest version
|
||||
schema:
|
||||
type: integer
|
||||
get:
|
||||
operationId: crc.api.file.get_file_data
|
||||
summary: Returns only the file contents
|
||||
|
@ -681,7 +672,7 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Workflow"
|
||||
/workflow/{workflow_id}/task/{task_id}/lookup/{field_id}:
|
||||
/workflow/{workflow_id}/lookup/{field_id}:
|
||||
parameters:
|
||||
- name: workflow_id
|
||||
in: path
|
||||
|
@ -690,13 +681,6 @@ paths:
|
|||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
- name: task_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the task
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- name: field_id
|
||||
in: path
|
||||
required: true
|
||||
|
@ -755,13 +739,6 @@ paths:
|
|||
schema:
|
||||
type: string
|
||||
/render_docx:
|
||||
parameters:
|
||||
- name: data
|
||||
in: query
|
||||
required: true
|
||||
description: The json data to use in populating the template
|
||||
schema:
|
||||
type: string
|
||||
put:
|
||||
operationId: crc.api.tools.render_docx
|
||||
security: [] # Disable security for this endpoint only.
|
||||
|
@ -777,6 +754,9 @@ paths:
|
|||
file:
|
||||
type: string
|
||||
format: binary
|
||||
data:
|
||||
type: string
|
||||
format: json
|
||||
responses:
|
||||
'200':
|
||||
description: Returns the generated document.
|
||||
|
@ -802,6 +782,54 @@ paths:
|
|||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Script"
|
||||
/approval:
|
||||
parameters:
|
||||
- name: approver_uid
|
||||
in: query
|
||||
required: false
|
||||
description: Restrict results to a given approver uid, maybe we restrict the use of this at somepoint.
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
operationId: crc.api.approval.get_approvals
|
||||
summary: Provides a list of workflows approvals
|
||||
tags:
|
||||
- Approvals
|
||||
responses:
|
||||
'200':
|
||||
description: An array of approvals
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Approval"
|
||||
/approval/{approval_id}:
|
||||
parameters:
|
||||
- name: approval_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the approval in question.
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
put:
|
||||
operationId: crc.api.approval.update_approval
|
||||
summary: Updates an approval with the given parameters
|
||||
tags:
|
||||
- Approvals
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Approval'
|
||||
responses:
|
||||
'200':
|
||||
description: Study updated successfully.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Approval"
|
||||
components:
|
||||
securitySchemes:
|
||||
jwt:
|
||||
|
@ -1209,7 +1237,10 @@ components:
|
|||
readOnly: true
|
||||
task:
|
||||
$ref: "#/components/schemas/Task"
|
||||
|
||||
|
||||
|
||||
Approval:
|
||||
properties:
|
||||
id:
|
||||
type: number
|
||||
format: integer
|
||||
example: 5
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
from crc import app, db, session
|
||||
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.approval import Approval, ApprovalModel, ApprovalSchema
|
||||
from crc.services.approval_service import ApprovalService
|
||||
|
||||
|
||||
def get_approvals(approver_uid = None):
|
||||
if not approver_uid:
|
||||
db_approvals = ApprovalService.get_all_approvals()
|
||||
else:
|
||||
db_approvals = ApprovalService.get_approvals_per_user(approver_uid)
|
||||
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
|
||||
results = ApprovalSchema(many=True).dump(approvals)
|
||||
return results
|
||||
|
||||
def update_approval(approval_id, body):
|
||||
if approval_id is None:
|
||||
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
|
||||
|
||||
approval_model = session.query(ApprovalModel).get(approval_id)
|
||||
if approval_model is None:
|
||||
raise ApiError('unknown_approval', 'The approval "' + str(approval_id) + '" is not recognized.')
|
||||
|
||||
approval: Approval = ApprovalSchema().load(body)
|
||||
approval.update_model(approval_model)
|
||||
session.commit()
|
||||
|
||||
result = ApprovalSchema().dump(approval)
|
||||
return result
|
|
@ -1,51 +1,58 @@
|
|||
import io
|
||||
from typing import List
|
||||
|
||||
import connexion
|
||||
from flask import send_file
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileModelSchema, FileModel, FileDataModel
|
||||
from crc.models.file import FileSchema, FileModel, File, FileModelSchema
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
def get_files(workflow_spec_id=None, study_id=None, workflow_id=None, task_id=None, form_field_key=None):
|
||||
if all(v is None for v in [workflow_spec_id, study_id, workflow_id, task_id, form_field_key]):
|
||||
raise ApiError('missing_parameter',
|
||||
'Please specify at least one of workflow_spec_id, study_id, '
|
||||
'workflow_id, and task_id for this file in the HTTP parameters')
|
||||
def to_file_api(file_model):
|
||||
"""Converts a FileModel object to something we can return via the aip"""
|
||||
return File.from_models(file_model, FileService.get_file_data(file_model.id))
|
||||
|
||||
results = FileService.get_files(workflow_spec_id, study_id, workflow_id, task_id, form_field_key)
|
||||
return FileModelSchema(many=True).dump(results)
|
||||
|
||||
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None):
|
||||
if all(v is None for v in [workflow_spec_id, workflow_id, form_field_key]):
|
||||
raise ApiError('missing_parameter',
|
||||
'Please specify either a workflow_spec_id or a '
|
||||
'workflow_id with an optional form_field_key')
|
||||
|
||||
file_models = FileService.get_files(workflow_spec_id=workflow_spec_id,
|
||||
workflow_id=workflow_id,
|
||||
irb_doc_code=form_field_key)
|
||||
|
||||
files = (to_file_api(model) for model in file_models)
|
||||
return FileSchema(many=True).dump(files)
|
||||
|
||||
|
||||
def get_reference_files():
|
||||
results = FileService.get_files(is_reference=True)
|
||||
return FileModelSchema(many=True).dump(results)
|
||||
files = (to_file_api(model) for model in results)
|
||||
return FileSchema(many=True).dump(files)
|
||||
|
||||
|
||||
def add_file(workflow_spec_id=None, study_id=None, workflow_id=None, task_id=None, form_field_key=None):
|
||||
all_none = all(v is None for v in [workflow_spec_id, study_id, workflow_id, task_id, form_field_key])
|
||||
missing_some = (workflow_spec_id is None) and (None in [study_id, workflow_id, form_field_key])
|
||||
if all_none or missing_some:
|
||||
raise ApiError('missing_parameter',
|
||||
'Please specify either a workflow_spec_id or all 3 of study_id, '
|
||||
'workflow_id, and field_id for this file in the HTTP parameters')
|
||||
if 'file' not in connexion.request.files:
|
||||
raise ApiError('invalid_file',
|
||||
'Expected a file named "file" in the multipart form request')
|
||||
|
||||
def add_file(workflow_spec_id=None, workflow_id=None, form_field_key=None):
|
||||
file = connexion.request.files['file']
|
||||
if workflow_spec_id:
|
||||
if workflow_id:
|
||||
if form_field_key is None:
|
||||
raise ApiError('invalid_workflow_file',
|
||||
'When adding a workflow related file, you must specify a form_field_key')
|
||||
file_model = FileService.add_workflow_file(workflow_id=workflow_id, irb_doc_code=form_field_key,
|
||||
name=file.filename, content_type=file.content_type,
|
||||
binary_data=file.stream.read())
|
||||
elif workflow_spec_id:
|
||||
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
|
||||
file_model = FileService.add_workflow_spec_file(workflow_spec, file.filename, file.content_type,
|
||||
file.stream.read())
|
||||
else:
|
||||
file_model = FileService.add_form_field_file(study_id, workflow_id, task_id, form_field_key, file.filename,
|
||||
file.content_type, file.stream.read())
|
||||
raise ApiError("invalid_file", "You must supply either a workflow spec id or a workflow_id and form_field_key.")
|
||||
|
||||
return FileModelSchema().dump(file_model)
|
||||
return FileSchema().dump(to_file_api(file_model))
|
||||
|
||||
|
||||
def get_reference_file(name):
|
||||
|
@ -80,7 +87,7 @@ def set_reference_file(name):
|
|||
file_model = file_models[0]
|
||||
FileService.update_file(file_models[0], file.stream.read(), file.content_type)
|
||||
|
||||
return FileModelSchema().dump(file_model)
|
||||
return FileSchema().dump(to_file_api(file_model))
|
||||
|
||||
|
||||
def update_file_data(file_id):
|
||||
|
@ -89,11 +96,11 @@ def update_file_data(file_id):
|
|||
if file_model is None:
|
||||
raise ApiError('no_such_file', 'The file id you provided does not exist')
|
||||
file_model = FileService.update_file(file_model, file.stream.read(), file.content_type)
|
||||
return FileModelSchema().dump(file_model)
|
||||
return FileSchema().dump(to_file_api(file_model))
|
||||
|
||||
|
||||
def get_file_data(file_id):
|
||||
file_data = FileService.get_file_data(file_id)
|
||||
def get_file_data(file_id, version=None):
|
||||
file_data = FileService.get_file_data(file_id, version)
|
||||
if file_data is None:
|
||||
raise ApiError('no_such_file', 'The file id you provided does not exist')
|
||||
return send_file(
|
||||
|
@ -101,7 +108,7 @@ def get_file_data(file_id):
|
|||
attachment_filename=file_data.file_model.name,
|
||||
mimetype=file_data.file_model.content_type,
|
||||
cache_timeout=-1, # Don't cache these files on the browser.
|
||||
last_modified=file_data.last_updated
|
||||
last_modified=file_data.date_created
|
||||
)
|
||||
|
||||
|
||||
|
@ -109,7 +116,7 @@ def get_file_info(file_id):
|
|||
file_model = session.query(FileModel).filter_by(id=file_id).with_for_update().first()
|
||||
if file_model is None:
|
||||
raise ApiError('no_such_file', 'The file id you provided does not exist', status_code=404)
|
||||
return FileModelSchema().dump(file_model)
|
||||
return FileSchema().dump(to_file_api(file_model))
|
||||
|
||||
|
||||
def update_file_info(file_id, body):
|
||||
|
@ -124,7 +131,7 @@ def update_file_info(file_id, body):
|
|||
file_model = FileModelSchema().load(body, session=session)
|
||||
session.add(file_model)
|
||||
session.commit()
|
||||
return FileModelSchema().dump(file_model)
|
||||
return FileSchema().dump(to_file_api(file_model))
|
||||
|
||||
|
||||
def delete_file(file_id):
|
||||
|
|
|
@ -1,25 +1,32 @@
|
|||
from typing import List
|
||||
from datetime import datetime
|
||||
|
||||
from connexion import NoContent
|
||||
from flask import g
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudySchema, StudyModel, Study
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.study_service import StudyService
|
||||
|
||||
|
||||
def add_study(body):
|
||||
"""This should never get called, and is subject to deprication. Studies
|
||||
should be added through the protocol builder only."""
|
||||
study: Study = StudySchema().load(body)
|
||||
study_model = StudyModel(**study.model_args())
|
||||
"""Or any study like object. Body should include a title, and primary_investigator_id """
|
||||
if 'primary_investigator_id' not in body:
|
||||
raise ApiError("missing_pi", "Can't create a new study without a Primary Investigator.")
|
||||
if 'title' not in body:
|
||||
raise ApiError("missing_title", "Can't create a new study without a title.")
|
||||
|
||||
study_model = StudyModel(user_uid=g.user.uid,
|
||||
title=body['title'],
|
||||
primary_investigator_id=body['primary_investigator_id'],
|
||||
last_updated=datetime.now(),
|
||||
protocol_builder_status=ProtocolBuilderStatus.ACTIVE)
|
||||
|
||||
session.add(study_model)
|
||||
errors = StudyService._add_all_workflow_specs_to_study(study)
|
||||
errors = StudyService._add_all_workflow_specs_to_study(study_model)
|
||||
session.commit()
|
||||
study = StudyService().get_study(study_model.id)
|
||||
study_data = StudySchema().dump(study)
|
||||
study_data["errors"] = ApiErrorSchema(many=True).dump(errors)
|
||||
return study_data
|
||||
|
@ -43,7 +50,7 @@ def update_study(study_id, body):
|
|||
def get_study(study_id):
|
||||
study_service = StudyService()
|
||||
study = study_service.get_study(study_id)
|
||||
if(study is None):
|
||||
if (study is None):
|
||||
raise ApiError("Study not found", status_code=404)
|
||||
schema = StudySchema()
|
||||
return schema.dump(study)
|
||||
|
@ -58,31 +65,16 @@ def delete_study(study_id):
|
|||
raise ApiError(code="study_integrity_error", message=message)
|
||||
|
||||
|
||||
def all_studies():
|
||||
"""Returns all the studies associated with the current user. Assures we are
|
||||
in sync with values read in from the protocol builder. """
|
||||
StudyService.synch_all_studies_with_protocol_builder(g.user)
|
||||
def user_studies():
|
||||
"""Returns all the studies associated with the current user. """
|
||||
StudyService.synch_with_protocol_builder_if_enabled(g.user)
|
||||
studies = StudyService.get_studies_for_user(g.user)
|
||||
results = StudySchema(many=True).dump(studies)
|
||||
return results
|
||||
|
||||
|
||||
def post_update_study_from_protocol_builder(study_id):
|
||||
"""Update a single study based on data received from
|
||||
the protocol builder."""
|
||||
|
||||
db_study = session.query(StudyModel).filter_by(study_id=study_id).all()
|
||||
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(g.user.uid)
|
||||
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study_id), None)
|
||||
if pb_study:
|
||||
db_study.update_from_protocol_builder(pb_study)
|
||||
else:
|
||||
db_study.inactive = True
|
||||
db_study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED
|
||||
|
||||
return NoContent, 304
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def all_studies():
|
||||
"""Returns all studies (regardless of user) with submitted files"""
|
||||
studies = StudyService.get_all_studies_with_files()
|
||||
results = StudySchema(many=True).dump(studies)
|
||||
return results
|
||||
|
|
|
@ -25,13 +25,14 @@ def render_markdown(data, template):
|
|||
raise ApiError(code="invalid", message=str(e))
|
||||
|
||||
|
||||
def render_docx(data):
|
||||
def render_docx():
|
||||
"""
|
||||
Provides a quick way to verify that a Jinja docx template will work properly on a given json
|
||||
data structure. Useful for folks that are building these templates.
|
||||
"""
|
||||
try:
|
||||
file = connexion.request.files['file']
|
||||
data = connexion.request.form['data']
|
||||
target_stream = CompleteTemplate().make_template(file, json.loads(data))
|
||||
return send_file(
|
||||
io.BytesIO(target_stream.read()),
|
||||
|
|
112
crc/api/user.py
112
crc/api/user.py
|
@ -1,12 +1,13 @@
|
|||
import json
|
||||
|
||||
import connexion
|
||||
from flask import redirect, g
|
||||
import flask
|
||||
from flask import redirect, g, request
|
||||
|
||||
from crc import sso, app, db
|
||||
from crc import app, db
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.user import UserModel, UserModelSchema
|
||||
|
||||
from crc.services.ldap_service import LdapService, LdapUserInfo
|
||||
|
||||
"""
|
||||
.. module:: crc.api.user
|
||||
|
@ -32,53 +33,76 @@ def verify_token(token):
|
|||
def get_current_user():
|
||||
return UserModelSchema().dump(g.user)
|
||||
|
||||
@app.route('/v1.0/login')
|
||||
def sso_login():
|
||||
# This what I see coming back:
|
||||
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
|
||||
# X-Remote-Sn: Funk
|
||||
# X-Remote-Givenname: Daniel
|
||||
# X-Remote-Uid: dhf8r
|
||||
# Eppn: dhf8r@virginia.edu
|
||||
# Cn: Daniel Harold Funk (dhf8r)
|
||||
# Sn: Funk
|
||||
# Givenname: Daniel
|
||||
# Uid: dhf8r
|
||||
# X-Remote-User: dhf8r@virginia.edu
|
||||
# X-Forwarded-For: 128.143.0.10
|
||||
# X-Forwarded-Host: dev.crconnect.uvadcos.io
|
||||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||
# Connection: Keep-Alive
|
||||
uid = request.headers.get("Uid")
|
||||
if not uid:
|
||||
uid = request.headers.get("X-Remote-Uid")
|
||||
|
||||
@sso.login_handler
|
||||
def sso_login(user_info):
|
||||
app.logger.info("Login from Shibboleth happening. " + json.dump(user_info))
|
||||
# TODO: Get redirect URL from Shibboleth request header
|
||||
_handle_login(user_info)
|
||||
if not uid:
|
||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||
% str(request.headers))
|
||||
|
||||
redirect = request.args.get('redirect')
|
||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
|
||||
|
||||
ldap_service = LdapService()
|
||||
info = ldap_service.user_info(uid)
|
||||
|
||||
return _handle_login(info, redirect)
|
||||
|
||||
@app.route('/sso')
|
||||
def sso():
|
||||
response = ""
|
||||
response += "<h1>Headers</h1>"
|
||||
response += "<ul>"
|
||||
for k,v in request.headers:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
response += "<h1>Environment</h1>"
|
||||
for k,v in request.environ:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
return response
|
||||
|
||||
|
||||
def _handle_login(user_info, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
||||
def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
||||
"""On successful login, adds user to database if the user is not already in the system,
|
||||
then returns the frontend auth callback URL, with auth token appended.
|
||||
|
||||
Args:
|
||||
user_info (dict of {
|
||||
uid: str,
|
||||
affiliation: Optional[str],
|
||||
display_name: Optional[str],
|
||||
email_address: Optional[str],
|
||||
eppn: Optional[str],
|
||||
first_name: Optional[str],
|
||||
last_name: Optional[str],
|
||||
title: Optional[str],
|
||||
}): Dictionary of user attributes
|
||||
redirect_url: Optional[str]
|
||||
user_info - an ldap user_info object.
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
"""
|
||||
uid = user_info['uid']
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == uid).first()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||
|
||||
if user is None:
|
||||
# Add new user
|
||||
user = UserModelSchema().load(user_info, session=db.session)
|
||||
else:
|
||||
# Update existing user data
|
||||
user = UserModelSchema().load(user_info, session=db.session, instance=user, partial=True)
|
||||
user = UserModel()
|
||||
|
||||
# Build display_name if not set
|
||||
if 'display_name' not in user_info or len(user_info['display_name']) == 0:
|
||||
display_name_list = []
|
||||
|
||||
for prop in ['first_name', 'last_name']:
|
||||
if prop in user_info and len(user_info[prop]) > 0:
|
||||
display_name_list.append(user_info[prop])
|
||||
|
||||
user.display_name = ' '.join(display_name_list)
|
||||
user.uid = user_info.uid
|
||||
user.display_name = user_info.display_name
|
||||
user.email_address = user_info.email_address
|
||||
user.affiliation = user_info.affiliation
|
||||
user.title = user_info.title
|
||||
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
|
@ -86,10 +110,17 @@ def _handle_login(user_info, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
|||
# Return the frontend auth callback URL, with auth token appended.
|
||||
auth_token = user.encode_auth_token().decode()
|
||||
if redirect_url is not None:
|
||||
return redirect('%s/%s' % (redirect_url, auth_token))
|
||||
if redirect_url.find("http://") != 0 and redirect_url.find("https://") != 0:
|
||||
redirect_url = "http://" + redirect_url
|
||||
url = '%s?token=%s' % (redirect_url, auth_token)
|
||||
app.logger.info("SSO_LOGIN: REDIRECTING TO: " + url)
|
||||
return flask.redirect(url, code=302)
|
||||
else:
|
||||
app.logger.info("SSO_LOGIN: NO REDIRECT, JUST RETURNING AUTH TOKEN.")
|
||||
return auth_token
|
||||
|
||||
|
||||
|
||||
def backdoor(
|
||||
uid=None,
|
||||
affiliation=None,
|
||||
|
@ -99,7 +130,7 @@ def backdoor(
|
|||
first_name=None,
|
||||
last_name=None,
|
||||
title=None,
|
||||
redirect_url=None,
|
||||
redirect=None,
|
||||
):
|
||||
"""A backdoor for end-to-end system testing that allows the system to simulate logging in as a specific user.
|
||||
Only works if the application is running in a non-production environment.
|
||||
|
@ -122,11 +153,8 @@ def backdoor(
|
|||
ApiError. If on production, returns a 404 error.
|
||||
"""
|
||||
if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
|
||||
user_info = {}
|
||||
for key in UserModel.__dict__.keys():
|
||||
if key in connexion.request.args:
|
||||
user_info[key] = connexion.request.args[key]
|
||||
|
||||
return _handle_login(user_info, redirect_url)
|
||||
ldap_info = LdapService().user_info(uid)
|
||||
return _handle_login(ldap_info, redirect)
|
||||
else:
|
||||
raise ApiError('404', 'unknown')
|
||||
|
|
|
@ -4,6 +4,7 @@ from crc import session
|
|||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
from crc.models.file import FileModel, LookupDataSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \
|
||||
WorkflowSpecCategoryModelSchema
|
||||
from crc.services.file_service import FileService
|
||||
|
@ -77,6 +78,8 @@ def delete_workflow_specification(spec_id):
|
|||
for file in files:
|
||||
FileService.delete_file(file.id)
|
||||
|
||||
session.query(TaskEventModel).filter(TaskEventModel.workflow_spec_id == spec_id).delete()
|
||||
|
||||
# Delete all stats and workflow models related to this specification
|
||||
for workflow in session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id):
|
||||
StudyService.delete_workflow(workflow)
|
||||
|
@ -115,8 +118,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
|||
next_task=None,
|
||||
navigation=navigation,
|
||||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_spec_version(),
|
||||
is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id),
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=processor.workflow_model.total_tasks,
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
last_updated=processor.workflow_model.last_updated
|
||||
|
@ -216,26 +219,13 @@ def delete_workflow_spec_category(cat_id):
|
|||
session.commit()
|
||||
|
||||
|
||||
def lookup(workflow_id, task_id, field_id, query, limit):
|
||||
def lookup(workflow_id, field_id, query, limit):
|
||||
"""
|
||||
given a field in a task, attempts to find the lookup table or function associated
|
||||
with that field and runs a full-text query against it to locate the values and
|
||||
labels that would be returned to a type-ahead box.
|
||||
Tries to be fast, but first runs will be very slow.
|
||||
"""
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
if not workflow_model:
|
||||
raise ApiError("unknown_workflow", "No workflow found with id: %i" % workflow_id)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
spiff_task = processor.bpmn_workflow.get_task(task_id)
|
||||
if not spiff_task:
|
||||
raise ApiError("unknown_task", "No task with %s found in workflow: %i" % (task_id, workflow_id))
|
||||
field = None
|
||||
for f in spiff_task.task_spec.form.fields:
|
||||
if f.id == field_id:
|
||||
field = f
|
||||
if not field:
|
||||
raise ApiError("unknown_field", "No field named %s in task %s" % (task_id, spiff_task.task_spec.name))
|
||||
|
||||
lookup_data = LookupService.lookup(spiff_task, field, query, limit)
|
||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
|
@ -0,0 +1,140 @@
|
|||
import enum
|
||||
|
||||
import marshmallow
|
||||
from ldap3.core.exceptions import LDAPSocketOpenError
|
||||
from marshmallow import INCLUDE
|
||||
from sqlalchemy import func
|
||||
|
||||
from crc import db, ma
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileDataModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
from crc.services.ldap_service import LdapService
|
||||
|
||||
|
||||
class ApprovalStatus(enum.Enum):
|
||||
WAITING = "WAITING" # no one has done jack.
|
||||
APPROVED = "APPROVED" # approved by the reviewer
|
||||
DECLINED = "DECLINED" # rejected by the reviewer
|
||||
CANCELED = "CANCELED" # The document was replaced with a new version and this review is no longer needed.
|
||||
|
||||
|
||||
class ApprovalFile(db.Model):
|
||||
file_data_id = db.Column(db.Integer, db.ForeignKey(FileDataModel.id), primary_key=True)
|
||||
approval_id = db.Column(db.Integer, db.ForeignKey("approval.id"), primary_key=True)
|
||||
|
||||
approval = db.relationship("ApprovalModel")
|
||||
file_data = db.relationship(FileDataModel)
|
||||
|
||||
|
||||
class ApprovalModel(db.Model):
|
||||
__tablename__ = 'approval'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=False)
|
||||
study = db.relationship(StudyModel, backref='approval', cascade='all,delete')
|
||||
workflow_id = db.Column(db.Integer, db.ForeignKey(WorkflowModel.id), nullable=False)
|
||||
workflow = db.relationship(WorkflowModel)
|
||||
approver_uid = db.Column(db.String) # Not linked to user model, as they may not have logged in yet.
|
||||
status = db.Column(db.String)
|
||||
message = db.Column(db.String, default='')
|
||||
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||
version = db.Column(db.Integer) # Incremented integer, so 1,2,3 as requests are made.
|
||||
approval_files = db.relationship(ApprovalFile, back_populates="approval",
|
||||
cascade="all, delete, delete-orphan",
|
||||
order_by=ApprovalFile.file_data_id)
|
||||
|
||||
|
||||
class Approval(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_model(cls, model: ApprovalModel):
|
||||
# TODO: Reduce the code by iterating over model's dict keys
|
||||
instance = cls()
|
||||
instance.id = model.id
|
||||
instance.study_id = model.study_id
|
||||
instance.workflow_id = model.workflow_id
|
||||
instance.version = model.version
|
||||
instance.approver_uid = model.approver_uid
|
||||
instance.status = model.status
|
||||
instance.message = model.message
|
||||
instance.date_created = model.date_created
|
||||
instance.version = model.version
|
||||
instance.title = ''
|
||||
if model.study:
|
||||
instance.title = model.study.title
|
||||
|
||||
instance.approver = {}
|
||||
try:
|
||||
ldap_service = LdapService()
|
||||
principal_investigator_id = model.study.primary_investigator_id
|
||||
user_info = ldap_service.user_info(principal_investigator_id)
|
||||
except (ApiError, LDAPSocketOpenError) as exception:
|
||||
user_info = None
|
||||
instance.approver['display_name'] = 'Primary Investigator details'
|
||||
instance.approver['department'] = 'currently not available'
|
||||
|
||||
if user_info:
|
||||
# TODO: Rename approver to primary investigator
|
||||
instance.approver['uid'] = model.approver_uid
|
||||
instance.approver['display_name'] = user_info.display_name
|
||||
instance.approver['title'] = user_info.title
|
||||
instance.approver['department'] = user_info.department
|
||||
|
||||
instance.associated_files = []
|
||||
for approval_file in model.approval_files:
|
||||
associated_file = {}
|
||||
associated_file['id'] = approval_file.file_data.file_model.id
|
||||
associated_file['name'] = approval_file.file_data.file_model.name
|
||||
associated_file['content_type'] = approval_file.file_data.file_model.content_type
|
||||
instance.associated_files.append(associated_file)
|
||||
|
||||
return instance
|
||||
|
||||
def update_model(self, approval_model: ApprovalModel):
|
||||
approval_model.status = self.status
|
||||
approval_model.message = self.message
|
||||
|
||||
|
||||
class ApprovalSchema(ma.Schema):
|
||||
class Meta:
|
||||
model = Approval
|
||||
fields = ["id", "study_id", "workflow_id", "version", "title",
|
||||
"version", "status", "message", "approver", "associated_files"]
|
||||
unknown = INCLUDE
|
||||
|
||||
@marshmallow.post_load
|
||||
def make_approval(self, data, **kwargs):
|
||||
"""Loads the basic approval data for updates to the database"""
|
||||
return Approval(**data)
|
||||
|
||||
# Carlos: Here is the data structure I was trying to imagine.
|
||||
# If I were to continue down my current traing of thought, I'd create
|
||||
# another class called just "Approval" that can take an ApprovalModel from the
|
||||
# database and construct a data structure like this one, that can
|
||||
# be provided to the API at an /approvals endpoint with GET and PUT
|
||||
# dat = { "approvals": [
|
||||
# {"id": 1,
|
||||
# "study_id": 20,
|
||||
# "workflow_id": 454,
|
||||
# "study_title": "Dan Funk (dhf8r)", # Really it's just the name of the Principal Investigator
|
||||
# "workflow_version": "21",
|
||||
# "approver": { # Pulled from ldap
|
||||
# "uid": "bgb22",
|
||||
# "display_name": "Billy Bob (bgb22)",
|
||||
# "title": "E42:He's a hoopy frood",
|
||||
# "department": "E0:EN-Eng Study of Parallel Universes",
|
||||
# },
|
||||
# "files": [
|
||||
# {
|
||||
# "id": 124,
|
||||
# "name": "ResearchRestart.docx",
|
||||
# "content_type": "docx-something-whatever"
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# ...
|
||||
# ]
|
|
@ -1,16 +1,18 @@
|
|||
import enum
|
||||
from typing import cast
|
||||
|
||||
from marshmallow import INCLUDE, EXCLUDE
|
||||
from marshmallow_enum import EnumField
|
||||
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
||||
from sqlalchemy import func, Index
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.orm import deferred
|
||||
|
||||
from crc import db
|
||||
from crc import db, ma
|
||||
|
||||
|
||||
class FileType(enum.Enum):
|
||||
bpmn = "bpmm"
|
||||
bpmn = "bpmn"
|
||||
csv = 'csv'
|
||||
dmn = "dmn"
|
||||
doc = "doc"
|
||||
|
@ -55,15 +57,16 @@ CONTENT_TYPES = {
|
|||
"zip": "application/zip"
|
||||
}
|
||||
|
||||
|
||||
class FileDataModel(db.Model):
|
||||
__tablename__ = 'file_data'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
md5_hash = db.Column(UUID(as_uuid=True), unique=False, nullable=False)
|
||||
data = db.Column(db.LargeBinary)
|
||||
data = deferred(db.Column(db.LargeBinary)) # Don't load it unless you have to.
|
||||
version = db.Column(db.Integer, default=0)
|
||||
last_updated = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||
file_model_id = db.Column(db.Integer, db.ForeignKey('file.id'))
|
||||
file_model = db.relationship("FileModel")
|
||||
file_model = db.relationship("FileModel", foreign_keys=[file_model_id])
|
||||
|
||||
|
||||
class FileModel(db.Model):
|
||||
|
@ -78,42 +81,69 @@ class FileModel(db.Model):
|
|||
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, critical for primary BPMN.
|
||||
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
|
||||
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
|
||||
study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=True)
|
||||
task_id = db.Column(db.String, nullable=True)
|
||||
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
|
||||
form_field_key = db.Column(db.String, nullable=True)
|
||||
latest_version = db.Column(db.Integer, default=0)
|
||||
|
||||
|
||||
class File(object):
|
||||
@classmethod
|
||||
def from_models(cls, model: FileModel, data_model: FileDataModel):
|
||||
instance = cls()
|
||||
instance.id = model.id
|
||||
instance.name = model.name
|
||||
instance.is_status = model.is_status
|
||||
instance.is_reference = model.is_reference
|
||||
instance.content_type = model.content_type
|
||||
instance.primary = model.primary
|
||||
instance.primary_process_id = model.primary_process_id
|
||||
instance.workflow_spec_id = model.workflow_spec_id
|
||||
instance.workflow_id = model.workflow_id
|
||||
instance.irb_doc_code = model.irb_doc_code
|
||||
instance.type = model.type
|
||||
if data_model:
|
||||
instance.last_modified = data_model.date_created
|
||||
instance.latest_version = data_model.version
|
||||
else:
|
||||
instance.last_modified = None
|
||||
instance.latest_version = None
|
||||
return instance
|
||||
|
||||
class FileModelSchema(SQLAlchemyAutoSchema):
|
||||
class Meta:
|
||||
model = FileModel
|
||||
load_instance = True
|
||||
include_relationships = True
|
||||
include_fk = True # Includes foreign keys
|
||||
unknown = EXCLUDE
|
||||
type = EnumField(FileType)
|
||||
|
||||
|
||||
class FileSchema(ma.Schema):
|
||||
class Meta:
|
||||
model = File
|
||||
fields = ["id", "name", "is_status", "is_reference", "content_type",
|
||||
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
|
||||
"irb_doc_code", "last_modified", "latest_version", "type"]
|
||||
unknown = INCLUDE
|
||||
type = EnumField(FileType)
|
||||
|
||||
|
||||
class LookupFileModel(db.Model):
|
||||
"""Takes the content of a file (like a xlsx, or csv file) and creates a key/value
|
||||
store that can be used for lookups and searches. This table contains the metadata,
|
||||
so we know the version of the file that was used, and what key column, and value column
|
||||
were used to generate this lookup table. ie, the same xls file might have multiple
|
||||
lookup file models, if different keys and labels are used - or someone decides to
|
||||
make a change. We need to handle full text search over the label and value columns,
|
||||
and not every column, because we don't know how much information will be in there. """
|
||||
"""Gives us a quick way to tell what kind of lookup is set on a form field.
|
||||
Connected to the file data model, so that if a new version of the same file is
|
||||
created, we can update the listing."""
|
||||
#fixme: What happens if they change the file associated with a lookup field?
|
||||
__tablename__ = 'lookup_file'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
label_column = db.Column(db.String)
|
||||
value_column = db.Column(db.String)
|
||||
workflow_spec_id = db.Column(db.String)
|
||||
field_id = db.Column(db.String)
|
||||
is_ldap = db.Column(db.Boolean) # Allows us to run an ldap query instead of a db lookup.
|
||||
file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id'))
|
||||
|
||||
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan")
|
||||
|
||||
class LookupDataModel(db.Model):
|
||||
__tablename__ = 'lookup_data'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
lookup_file_model_id = db.Column(db.Integer, db.ForeignKey('lookup_file.id'))
|
||||
lookup_file_model = db.relationship(LookupFileModel)
|
||||
value = db.Column(db.String)
|
||||
label = db.Column(db.String)
|
||||
# In the future, we might allow adding an additional "search" column if we want to search things not in label.
|
||||
|
@ -139,3 +169,9 @@ class LookupDataSchema(SQLAlchemyAutoSchema):
|
|||
include_relationships = False
|
||||
include_fk = False # Includes foreign keys
|
||||
|
||||
|
||||
class SimpleFileSchema(ma.Schema):
|
||||
|
||||
class Meta:
|
||||
model = FileModel
|
||||
fields = ["name"]
|
||||
|
|
|
@ -5,6 +5,7 @@ from sqlalchemy import func
|
|||
|
||||
from crc import db, ma
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.file import FileModel, SimpleFileSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
|
||||
WorkflowModel
|
||||
|
@ -63,7 +64,7 @@ class WorkflowMetadata(object):
|
|||
name=workflow.workflow_spec.name,
|
||||
display_name=workflow.workflow_spec.display_name,
|
||||
description=workflow.workflow_spec.description,
|
||||
spec_version=workflow.spec_version,
|
||||
spec_version=workflow.spec_version(),
|
||||
category_id=workflow.workflow_spec.category_id,
|
||||
state=WorkflowState.optional,
|
||||
status=workflow.status,
|
||||
|
@ -102,7 +103,8 @@ class CategorySchema(ma.Schema):
|
|||
|
||||
class Study(object):
|
||||
|
||||
def __init__(self, id, title, last_updated, primary_investigator_id, user_uid,
|
||||
def __init__(self, title, last_updated, primary_investigator_id, user_uid,
|
||||
id=None,
|
||||
protocol_builder_status=None,
|
||||
sponsor="", hsr_number="", ind_number="", categories=[], **argsv):
|
||||
self.id = id
|
||||
|
@ -116,11 +118,12 @@ class Study(object):
|
|||
self.ind_number = ind_number
|
||||
self.categories = categories
|
||||
self.warnings = []
|
||||
|
||||
self.files = []
|
||||
|
||||
@classmethod
|
||||
def from_model(cls, study_model: StudyModel):
|
||||
args = {k: v for k, v in study_model.__dict__.items() if not k.startswith('_')}
|
||||
id = study_model.id # Just read some value, in case the dict expired, otherwise dict may be empty.
|
||||
args = dict((k, v) for k, v in study_model.__dict__.items() if not k.startswith('_'))
|
||||
instance = cls(**args)
|
||||
return instance
|
||||
|
||||
|
@ -139,10 +142,14 @@ class Study(object):
|
|||
|
||||
class StudySchema(ma.Schema):
|
||||
|
||||
id = fields.Integer(required=False, allow_none=True)
|
||||
categories = fields.List(fields.Nested(CategorySchema), dump_only=True)
|
||||
warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True)
|
||||
protocol_builder_status = EnumField(ProtocolBuilderStatus)
|
||||
hsr_number = fields.String(allow_none=True)
|
||||
sponsor = fields.String(allow_none=True)
|
||||
ind_number = fields.String(allow_none=True)
|
||||
files = fields.List(fields.Nested(SimpleFileSchema), dump_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Study
|
||||
|
@ -154,3 +161,4 @@ class StudySchema(ma.Schema):
|
|||
def make_study(self, data, **kwargs):
|
||||
"""Can load the basic study data for updates to the database, but categories are write only"""
|
||||
return Study(**data)
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ class UserModel(db.Model):
|
|||
last_name = db.Column(db.String, nullable=True)
|
||||
title = db.Column(db.String, nullable=True)
|
||||
|
||||
# Add Department and School
|
||||
|
||||
|
||||
def encode_auth_token(self):
|
||||
"""
|
||||
|
|
|
@ -5,6 +5,7 @@ from marshmallow import EXCLUDE
|
|||
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
||||
|
||||
from crc import db
|
||||
from crc.models.file import FileModel, FileDataModel
|
||||
|
||||
|
||||
class WorkflowSpecCategoryModel(db.Model):
|
||||
|
@ -67,16 +68,30 @@ class WorkflowStatus(enum.Enum):
|
|||
complete = "complete"
|
||||
|
||||
|
||||
class WorkflowSpecDependencyFile(db.Model):
|
||||
"""Connects a workflow to the version of the specification files it depends on to execute"""
|
||||
file_data_id = db.Column(db.Integer, db.ForeignKey(FileDataModel.id), primary_key=True)
|
||||
workflow_id = db.Column(db.Integer, db.ForeignKey("workflow.id"), primary_key=True)
|
||||
|
||||
file_data = db.relationship(FileDataModel)
|
||||
|
||||
|
||||
class WorkflowModel(db.Model):
|
||||
__tablename__ = 'workflow'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
bpmn_workflow_json = db.Column(db.JSON)
|
||||
status = db.Column(db.Enum(WorkflowStatus))
|
||||
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
|
||||
study = db.relationship("StudyModel", backref='workflow')
|
||||
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
|
||||
workflow_spec = db.relationship("WorkflowSpecModel")
|
||||
spec_version = db.Column(db.String)
|
||||
total_tasks = db.Column(db.Integer, default=0)
|
||||
completed_tasks = db.Column(db.Integer, default=0)
|
||||
# task_history = db.Column(db.ARRAY(db.String), default=[]) # The history stack of user completed tasks.
|
||||
last_updated = db.Column(db.DateTime)
|
||||
last_updated = db.Column(db.DateTime)
|
||||
# Order By is important or generating hashes on reviews.
|
||||
dependencies = db.relationship(WorkflowSpecDependencyFile, cascade="all, delete, delete-orphan",
|
||||
order_by="WorkflowSpecDependencyFile.file_data_id")
|
||||
|
||||
def spec_version(self):
|
||||
dep_ids = list(dep.file_data_id for dep in self.dependencies)
|
||||
return "-".join(str(dep_ids))
|
||||
|
|
|
@ -27,24 +27,20 @@ Takes two arguments:
|
|||
2. The 'code' of the IRB Document as set in the irb_documents.xlsx file."
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, *args, **kwargs):
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
"""For validation only, process the template, but do not store it in the database."""
|
||||
self.process_template(task, study_id, None, *args, **kwargs)
|
||||
|
||||
def do_task(self, task, study_id, *args, **kwargs):
|
||||
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
final_document_stream = self.process_template(task, study_id, workflow, *args, **kwargs)
|
||||
file_name = args[0]
|
||||
irb_doc_code = args[1]
|
||||
FileService.add_task_file(study_id=study_id,
|
||||
workflow_id=workflow_id,
|
||||
workflow_spec_id=workflow.workflow_spec_id,
|
||||
task_id=task.id,
|
||||
name=file_name,
|
||||
content_type=CONTENT_TYPES['docx'],
|
||||
binary_data=final_document_stream.read(),
|
||||
irb_doc_code=irb_doc_code)
|
||||
FileService.add_workflow_file(workflow_id=workflow_id,
|
||||
name=file_name,
|
||||
content_type=CONTENT_TYPES['docx'],
|
||||
binary_data=final_document_stream.read(),
|
||||
irb_doc_code=irb_doc_code)
|
||||
|
||||
def process_template(self, task, study_id, workflow=None, *args, **kwargs):
|
||||
"""Entry point, mostly worried about wiring it all up."""
|
||||
|
@ -63,13 +59,13 @@ Takes two arguments:
|
|||
|
||||
file_data_model = None
|
||||
if workflow is not None:
|
||||
# Get the workflow's latest files
|
||||
joined_file_data_models = WorkflowProcessor\
|
||||
.get_file_models_for_version(workflow.workflow_spec_id, workflow.spec_version)
|
||||
|
||||
for joined_file_data in joined_file_data_models:
|
||||
if joined_file_data.file_model.name == file_name:
|
||||
file_data_model = session.query(FileDataModel).filter_by(id=joined_file_data.id).first()
|
||||
# Get the workflow specification file with the given name.
|
||||
file_data_models = FileService.get_spec_data_files(
|
||||
workflow_spec_id=workflow.workflow_spec_id,
|
||||
workflow_id=workflow.id)
|
||||
for file_data in file_data_models:
|
||||
if file_data.file_model.name == file_name:
|
||||
file_data_model = file_data
|
||||
|
||||
if workflow is None or file_data_model is None:
|
||||
file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
|
||||
|
|
|
@ -20,10 +20,10 @@ class FactService(Script):
|
|||
response = requests.get('https://api.chucknorris.io/jokes/random')
|
||||
return response.json()['value']
|
||||
|
||||
def do_task_validate_only(self, task, study_id, **kwargs):
|
||||
self.do_task(task, study_id, **kwargs)
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, **kwargs):
|
||||
self.do_task(task, study_id, workflow_id, **kwargs)
|
||||
|
||||
def do_task(self, task, study_id, **kwargs):
|
||||
def do_task(self, task, study_id, workflow_id, **kwargs):
|
||||
print(task.data)
|
||||
|
||||
if "type" not in task.data:
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
from crc.api.common import ApiError
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.approval_service import ApprovalService
|
||||
|
||||
|
||||
class RequestApproval(Script):
|
||||
"""This still needs to be fully wired up as a Script task callable from the workflow
|
||||
But the basic logic is here just to get the tests passing and logic sound. """
|
||||
|
||||
def get_description(self):
|
||||
return """
|
||||
Creates an approval request on this workflow, by the given approver_uid(s),"
|
||||
Takes multiple arguments, which should point to data located in current task
|
||||
or be quoted strings.
|
||||
|
||||
Example:
|
||||
RequestApproval approver1 "dhf8r"
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
self.get_uids(task, args)
|
||||
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
uids = self.get_uids(task, args)
|
||||
if isinstance(uids, str):
|
||||
ApprovalService.add_approval(study_id, workflow_id, args)
|
||||
elif isinstance(uids, list):
|
||||
for id in uids:
|
||||
ApprovalService.add_approval(study_id, workflow_id, id)
|
||||
|
||||
def get_uids(self, task, args):
|
||||
if len(args) < 1:
|
||||
raise ApiError(code="missing_argument",
|
||||
message="The RequestApproval script requires at least one argument. The "
|
||||
"the name of the variable in the task data that contains user"
|
||||
"id to process. Multiple arguments are accepted.")
|
||||
uids = []
|
||||
for arg in args:
|
||||
id = task.workflow.script_engine.evaluate_expression(task, arg)
|
||||
uids.append(id)
|
||||
if not isinstance(id, str):
|
||||
raise ApiError(code="invalid_argument",
|
||||
message="The RequestApproval script requires 1 argument. The "
|
||||
"the name of the variable in the task data that contains user"
|
||||
"ids to process. This must point to an array or a string, but "
|
||||
"it currently points to a %s " % uids.__class__.__name__)
|
||||
|
||||
return uids
|
||||
|
|
@ -13,12 +13,12 @@ class Script(object):
|
|||
raise ApiError("invalid_script",
|
||||
"This script does not supply a description.")
|
||||
|
||||
def do_task(self, task, study_id, **kwargs):
|
||||
def do_task(self, task, study_id, workflow_id, **kwargs):
|
||||
raise ApiError("invalid_script",
|
||||
"This is an internal error. The script you are trying to execute '%s' " % self.__class__.__name__ +
|
||||
"does not properly implement the do_task function.")
|
||||
|
||||
def do_task_validate_only(self, task, study_id, **kwargs):
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, **kwargs):
|
||||
raise ApiError("invalid_script",
|
||||
"This is an internal error. The script you are trying to execute '%s' " % self.__class__.__name__ +
|
||||
"does must provide a validate_only option that mimics the do_task, " +
|
||||
|
|
|
@ -138,7 +138,7 @@ Returns information specific to the protocol.
|
|||
documents_example=self.example_to_string("documents"),
|
||||
)
|
||||
|
||||
def do_task_validate_only(self, task, study_id, *args, **kwargs):
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
"""For validation only, pretend no results come back from pb"""
|
||||
self.check_args(args)
|
||||
# Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
|
||||
|
@ -184,7 +184,7 @@ Returns information specific to the protocol.
|
|||
self.add_data_to_task(task=task, data=data["study"])
|
||||
self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)})
|
||||
|
||||
def do_task(self, task, study_id, *args, **kwargs):
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
self.check_args(args)
|
||||
|
||||
cmd = args[0]
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
import requests
|
||||
|
||||
from crc import db
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.study import StudyModel
|
||||
from crc.scripts.script import Script
|
||||
|
||||
|
||||
class mock_study:
|
||||
def __init__(self):
|
||||
self.title = ""
|
||||
self.principle_investigator_id = ""
|
||||
|
||||
|
||||
class UpdateStudy(Script):
|
||||
|
||||
argument_error_message = "You must supply at least one argument to the " \
|
||||
"update_study task, in the form [study_field]:[value]",
|
||||
|
||||
def get_description(self):
|
||||
return """
|
||||
Allows you to set specific attributes on the Study model by mapping them to
|
||||
values in the task data. Should be called with the value to set (either title, or pi)
|
||||
followed by a ":" and then the value to use in dot notation.
|
||||
|
||||
Example:
|
||||
UpdateStudy title:PIComputingID.label pi:PIComputingID.value
|
||||
"""
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
study = mock_study
|
||||
self.__update_study(task, study, *args)
|
||||
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
study = db.session.query(StudyModel).filter(StudyModel.id == study_id).first()
|
||||
self.__update_study(task, study, *args)
|
||||
db.session.add(study)
|
||||
|
||||
def __update_study(self, task, study, *args):
|
||||
if len(args) < 1:
|
||||
raise ApiError.from_task("missing_argument", self.argument_error_message,
|
||||
task=task)
|
||||
|
||||
for arg in args:
|
||||
try:
|
||||
field, value_lookup = arg.split(':')
|
||||
except:
|
||||
raise ApiError.from_task("invalid_argument", self.argument_error_message,
|
||||
task=task)
|
||||
|
||||
value = task.workflow.script_engine.evaluate_expression(task, value_lookup)
|
||||
|
||||
if field.lower() == "title":
|
||||
study.title = value
|
||||
elif field.lower() == "pi":
|
||||
study.primary_investigator_id = value
|
||||
else:
|
||||
raise ApiError.from_task("invalid_argument", self.argument_error_message,
|
||||
task=task)
|
|
@ -0,0 +1,97 @@
|
|||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import desc
|
||||
|
||||
from crc import db, session
|
||||
from crc.api.common import ApiError
|
||||
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus, ApprovalFile
|
||||
from crc.models.workflow import WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
class ApprovalService(object):
|
||||
"""Provides common tools for working with an Approval"""
|
||||
|
||||
@staticmethod
|
||||
def get_approvals_per_user(approver_uid):
|
||||
"""Returns a list of all approvals for the given user (approver)"""
|
||||
db_approvals = session.query(ApprovalModel).filter_by(approver_uid=approver_uid).all()
|
||||
return db_approvals
|
||||
|
||||
@staticmethod
|
||||
def get_all_approvals():
|
||||
"""Returns a list of all approvlas"""
|
||||
db_approvals = session.query(ApprovalModel).all()
|
||||
return db_approvals
|
||||
|
||||
@staticmethod
|
||||
def update_approval(approval_id, approver_uid, status):
|
||||
"""Update a specific approval"""
|
||||
db_approval = session.query(ApprovalModel).get(approval_id)
|
||||
if db_approval:
|
||||
db_approval.status = status
|
||||
session.add(db_approval)
|
||||
session.commit()
|
||||
# TODO: Log update action by approver_uid - maybe ?
|
||||
return db_approval
|
||||
|
||||
@staticmethod
|
||||
def add_approval(study_id, workflow_id, approver_uid):
|
||||
"""we might have multiple approvals for a workflow, so I would expect this
|
||||
method to get called multiple times for the same workflow. This will
|
||||
only add a new approval if no approval already exists for the approver_uid,
|
||||
unless the workflow has changed, at which point, it will CANCEL any
|
||||
pending approvals and create a new approval for the latest version
|
||||
of the workflow."""
|
||||
|
||||
# Find any existing approvals for this workflow and approver.
|
||||
latest_approval_request = db.session.query(ApprovalModel). \
|
||||
filter(ApprovalModel.workflow_id == workflow_id). \
|
||||
filter(ApprovalModel.approver_uid == approver_uid). \
|
||||
order_by(desc(ApprovalModel.version)).first()
|
||||
|
||||
# Construct as hash of the latest files to see if things have changed since
|
||||
# the last approval.
|
||||
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
workflow_data_files = FileService.get_workflow_data_files(workflow_id)
|
||||
current_data_file_ids = list(data_file.id for data_file in workflow_data_files)
|
||||
|
||||
if len(current_data_file_ids) == 0:
|
||||
raise ApiError("invalid_workflow_approval", "You can't create an approval for a workflow that has"
|
||||
"no files to approve in it.")
|
||||
|
||||
# If an existing approval request exists and no changes were made, do nothing.
|
||||
# If there is an existing approval request for a previous version of the workflow
|
||||
# then add a new request, and cancel any waiting/pending requests.
|
||||
if latest_approval_request:
|
||||
request_file_ids = list(file.file_data_id for file in latest_approval_request.approval_files)
|
||||
current_data_file_ids.sort()
|
||||
request_file_ids.sort()
|
||||
if current_data_file_ids == request_file_ids:
|
||||
return # This approval already exists.
|
||||
else:
|
||||
latest_approval_request.status = ApprovalStatus.CANCELED.value
|
||||
db.session.add(latest_approval_request)
|
||||
version = latest_approval_request.version + 1
|
||||
else:
|
||||
version = 1
|
||||
|
||||
model = ApprovalModel(study_id=study_id, workflow_id=workflow_id,
|
||||
approver_uid=approver_uid, status=ApprovalStatus.WAITING.value,
|
||||
message="", date_created=datetime.now(),
|
||||
version=version)
|
||||
approval_files = ApprovalService._create_approval_files(workflow_data_files, model)
|
||||
db.session.add(model)
|
||||
db.session.add_all(approval_files)
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def _create_approval_files(workflow_data_files, approval):
|
||||
"""Currently based exclusively on the status of files associated with a workflow."""
|
||||
file_approval_models = []
|
||||
for file_data in workflow_data_files:
|
||||
file_approval_models.append(ApprovalFile(file_data_id=file_data.id,
|
||||
approval=approval))
|
||||
return file_approval_models
|
||||
|
|
@ -5,13 +5,14 @@ from datetime import datetime
|
|||
from uuid import UUID
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
||||
from pandas import ExcelFile
|
||||
from sqlalchemy import desc
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
|
||||
|
||||
|
||||
class FileService(object):
|
||||
|
@ -40,22 +41,28 @@ class FileService(object):
|
|||
return code in df['code'].values
|
||||
|
||||
@staticmethod
|
||||
def add_form_field_file(study_id, workflow_id, task_id, form_field_key, name, content_type, binary_data):
|
||||
"""Create a new file and associate it with a user task form field within a workflow.
|
||||
Please note that the form_field_key MUST be a known file in the irb_documents.xslx reference document."""
|
||||
if not FileService.is_allowed_document(form_field_key):
|
||||
def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data):
|
||||
"""Create a new file and associate it with the workflow
|
||||
Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document."""
|
||||
if not FileService.is_allowed_document(irb_doc_code):
|
||||
raise ApiError("invalid_form_field_key",
|
||||
"When uploading files, the form field id must match a known document in the "
|
||||
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % form_field_key)
|
||||
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
|
||||
|
||||
file_model = FileModel(
|
||||
study_id=study_id,
|
||||
workflow_id=workflow_id,
|
||||
task_id=task_id,
|
||||
name=name,
|
||||
form_field_key=form_field_key,
|
||||
irb_doc_code=form_field_key
|
||||
)
|
||||
"""Assure this is unique to the workflow, task, and document code AND the Name
|
||||
Because we will allow users to upload multiple files for the same form field
|
||||
in some cases """
|
||||
file_model = session.query(FileModel)\
|
||||
.filter(FileModel.workflow_id == workflow_id)\
|
||||
.filter(FileModel.name == name)\
|
||||
.filter(FileModel.irb_doc_code == irb_doc_code).first()
|
||||
|
||||
if not file_model:
|
||||
file_model = FileModel(
|
||||
workflow_id=workflow_id,
|
||||
name=name,
|
||||
irb_doc_code=irb_doc_code
|
||||
)
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
|
||||
@staticmethod
|
||||
|
@ -76,18 +83,10 @@ class FileService(object):
|
|||
return json.loads(df.to_json(orient='index'))
|
||||
|
||||
@staticmethod
|
||||
def add_task_file(study_id, workflow_id, workflow_spec_id, task_id, name, content_type, binary_data,
|
||||
irb_doc_code=None):
|
||||
"""Create a new file and associate it with an executing task within a workflow."""
|
||||
file_model = FileModel(
|
||||
study_id=study_id,
|
||||
workflow_id=workflow_id,
|
||||
workflow_spec_id=workflow_spec_id,
|
||||
task_id=task_id,
|
||||
name=name,
|
||||
irb_doc_code=irb_doc_code
|
||||
)
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
def get_workflow_files(workflow_id):
|
||||
"""Returns all the file models associated with a running workflow."""
|
||||
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
|
||||
order_by(FileModel.id).all()
|
||||
|
||||
@staticmethod
|
||||
def add_reference_file(name, content_type, binary_data):
|
||||
|
@ -112,12 +111,12 @@ class FileService(object):
|
|||
def update_file(file_model, binary_data, content_type):
|
||||
session.flush() # Assure the database is up-to-date before running this.
|
||||
|
||||
file_data_model = session.query(FileDataModel). \
|
||||
filter_by(file_model_id=file_model.id,
|
||||
version=file_model.latest_version
|
||||
).with_for_update().first()
|
||||
latest_data_model = session.query(FileDataModel). \
|
||||
filter(FileDataModel.file_model_id == file_model.id).\
|
||||
order_by(desc(FileDataModel.date_created)).first()
|
||||
|
||||
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
||||
if (file_data_model is not None) and (md5_checksum == file_data_model.md5_hash):
|
||||
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
||||
# This file does not need to be updated, it's the same file.
|
||||
return file_model
|
||||
|
||||
|
@ -131,22 +130,20 @@ class FileService(object):
|
|||
file_model.type = FileType[file_extension]
|
||||
file_model.content_type = content_type
|
||||
|
||||
if file_data_model is None:
|
||||
if latest_data_model is None:
|
||||
version = 1
|
||||
else:
|
||||
version = file_data_model.version + 1
|
||||
version = latest_data_model.version + 1
|
||||
|
||||
# If this is a BPMN, extract the process id.
|
||||
if file_model.type == FileType.bpmn:
|
||||
bpmn: ElementTree.Element = ElementTree.fromstring(binary_data)
|
||||
file_model.primary_process_id = WorkflowProcessor.get_process_id(bpmn)
|
||||
file_model.primary_process_id = FileService.get_process_id(bpmn)
|
||||
|
||||
file_model.latest_version = version
|
||||
new_file_data_model = FileDataModel(
|
||||
data=binary_data, file_model_id=file_model.id, file_model=file_model,
|
||||
version=version, md5_hash=md5_checksum, last_updated=datetime.now()
|
||||
version=version, md5_hash=md5_checksum, date_created=datetime.now()
|
||||
)
|
||||
|
||||
session.add_all([file_model, new_file_data_model])
|
||||
session.commit()
|
||||
session.flush() # Assure the id is set on the model before returning it.
|
||||
|
@ -154,46 +151,103 @@ class FileService(object):
|
|||
return file_model
|
||||
|
||||
@staticmethod
|
||||
def get_files(workflow_spec_id=None,
|
||||
study_id=None, workflow_id=None, task_id=None, form_field_key=None,
|
||||
def get_process_id(et_root: ElementTree.Element):
|
||||
process_elements = []
|
||||
for child in et_root:
|
||||
if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
|
||||
process_elements.append(child)
|
||||
|
||||
if len(process_elements) == 0:
|
||||
raise ValidationException('No executable process tag found')
|
||||
|
||||
# There are multiple root elements
|
||||
if len(process_elements) > 1:
|
||||
|
||||
# Look for the element that has the startEvent in it
|
||||
for e in process_elements:
|
||||
this_element: ElementTree.Element = e
|
||||
for child_element in list(this_element):
|
||||
if child_element.tag.endswith('startEvent'):
|
||||
return this_element.attrib['id']
|
||||
|
||||
raise ValidationException('No start event found in %s' % et_root.attrib['id'])
|
||||
|
||||
return process_elements[0].attrib['id']
|
||||
|
||||
@staticmethod
|
||||
def get_files_for_study(study_id, irb_doc_code=None):
|
||||
query = session.query(FileModel).\
|
||||
join(WorkflowModel).\
|
||||
filter(WorkflowModel.study_id == study_id)
|
||||
if irb_doc_code:
|
||||
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
|
||||
return query.all()
|
||||
|
||||
@staticmethod
|
||||
def get_files(workflow_spec_id=None, workflow_id=None,
|
||||
name=None, is_reference=False, irb_doc_code=None):
|
||||
query = session.query(FileModel).filter_by(is_reference=is_reference)
|
||||
if workflow_spec_id:
|
||||
query = query.filter_by(workflow_spec_id=workflow_spec_id)
|
||||
if all(v is None for v in [study_id, workflow_id, task_id, form_field_key]):
|
||||
query = query.filter_by(
|
||||
study_id=None,
|
||||
workflow_id=None,
|
||||
task_id=None,
|
||||
form_field_key=None,
|
||||
)
|
||||
else:
|
||||
if study_id:
|
||||
query = query.filter_by(study_id=study_id)
|
||||
if workflow_id:
|
||||
query = query.filter_by(workflow_id=workflow_id)
|
||||
if task_id:
|
||||
query = query.filter_by(task_id=str(task_id))
|
||||
if form_field_key:
|
||||
query = query.filter_by(form_field_key=form_field_key)
|
||||
if name:
|
||||
query = query.filter_by(name=name)
|
||||
elif workflow_id:
|
||||
query = query.filter_by(workflow_id=workflow_id)
|
||||
if irb_doc_code:
|
||||
query = query.filter_by(irb_doc_code=irb_doc_code)
|
||||
elif is_reference:
|
||||
query = query.filter_by(is_reference=True)
|
||||
|
||||
if name:
|
||||
query = query.filter_by(name=name)
|
||||
query = query.order_by(FileModel.id)
|
||||
|
||||
results = query.all()
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def get_file_data(file_id, file_model=None):
|
||||
"""Returns the file_data that is associated with the file model id, if an actual file_model
|
||||
is provided, uses that rather than looking it up again."""
|
||||
if file_model is None:
|
||||
file_model = session.query(FileModel).filter(FileModel.id == file_id).first()
|
||||
return session.query(FileDataModel) \
|
||||
.filter(FileDataModel.file_model_id == file_id) \
|
||||
.filter(FileDataModel.version == file_model.latest_version) \
|
||||
.first()
|
||||
def get_spec_data_files(workflow_spec_id, workflow_id=None, name=None):
|
||||
"""Returns all the FileDataModels related to a workflow specification.
|
||||
If a workflow is specified, returns the version of the spec relatted
|
||||
to that workflow, otherwise, returns the lastes files."""
|
||||
if workflow_id:
|
||||
query = session.query(FileDataModel) \
|
||||
.join(WorkflowSpecDependencyFile) \
|
||||
.filter(WorkflowSpecDependencyFile.workflow_id == workflow_id) \
|
||||
.order_by(FileDataModel.id)
|
||||
if name:
|
||||
query = query.join(FileModel).filter(FileModel.name == name)
|
||||
return query.all()
|
||||
else:
|
||||
"""Returns all the latest files related to a workflow specification"""
|
||||
file_models = FileService.get_files(workflow_spec_id=workflow_spec_id)
|
||||
latest_data_files = []
|
||||
for file_model in file_models:
|
||||
if name and file_model.name == name:
|
||||
latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
elif not name:
|
||||
latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
return latest_data_files
|
||||
|
||||
@staticmethod
|
||||
def get_workflow_data_files(workflow_id=None):
|
||||
"""Returns all the FileDataModels related to a running workflow -
|
||||
So these are the latest data files that were uploaded or generated
|
||||
that go along with this workflow. Not related to the spec in any way"""
|
||||
file_models = FileService.get_files(workflow_id=workflow_id)
|
||||
latest_data_files = []
|
||||
for file_model in file_models:
|
||||
latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
return latest_data_files
|
||||
|
||||
@staticmethod
|
||||
def get_file_data(file_id: int, version: int = None):
|
||||
"""Returns the file data with the given version, or the lastest file, if version isn't provided."""
|
||||
query = session.query(FileDataModel) \
|
||||
.filter(FileDataModel.file_model_id == file_id)
|
||||
if version:
|
||||
query = query.filter(FileDataModel.version == version)
|
||||
else:
|
||||
query = query.order_by(desc(FileDataModel.date_created))
|
||||
return query.first()
|
||||
|
||||
@staticmethod
|
||||
def get_reference_file_data(file_name):
|
||||
|
@ -202,7 +256,7 @@ class FileService(object):
|
|||
filter(FileModel.name == file_name).first()
|
||||
if not file_model:
|
||||
raise ApiError("file_not_found", "There is no reference file with the name '%s'" % file_name)
|
||||
return FileService.get_file_data(file_model.id, file_model)
|
||||
return FileService.get_file_data(file_model.id)
|
||||
|
||||
@staticmethod
|
||||
def get_workflow_file_data(workflow, file_name):
|
||||
|
|
|
@ -8,23 +8,37 @@ from crc.api.common import ApiError
|
|||
|
||||
class LdapUserInfo(object):
|
||||
|
||||
def __init__(self, entry):
|
||||
self.display_name = entry.displayName.value
|
||||
self.given_name = ", ".join(entry.givenName)
|
||||
self.email = entry.mail.value
|
||||
self.telephone_number = ", ".join(entry.telephoneNumber)
|
||||
self.title = ", ".join(entry.title)
|
||||
self.department = ", ".join(entry.uvaDisplayDepartment)
|
||||
self.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
|
||||
self.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
|
||||
self.uid = entry.uid.value
|
||||
def __init__(self):
|
||||
self.display_name = ''
|
||||
self.given_name = ''
|
||||
self.email_address = ''
|
||||
self.telephone_number = ''
|
||||
self.title = ''
|
||||
self.department = ''
|
||||
self.affiliation = ''
|
||||
self.sponsor_type = ''
|
||||
self.uid = ''
|
||||
|
||||
@classmethod
|
||||
def from_entry(cls, entry):
|
||||
instance = cls()
|
||||
instance.display_name = entry.displayName.value
|
||||
instance.given_name = ", ".join(entry.givenName)
|
||||
instance.email_address = entry.mail.value
|
||||
instance.telephone_number = ", ".join(entry.telephoneNumber)
|
||||
instance.title = ", ".join(entry.title)
|
||||
instance.department = ", ".join(entry.uvaDisplayDepartment)
|
||||
instance.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
|
||||
instance.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
|
||||
instance.uid = entry.uid.value
|
||||
return instance
|
||||
|
||||
class LdapService(object):
|
||||
search_base = "ou=People,o=University of Virginia,c=US"
|
||||
attributes = ['uid', 'cn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
|
||||
attributes = ['uid', 'cn', 'sn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
|
||||
'telephoneNumber', 'title', 'uvaPersonIAMAffiliation', 'uvaPersonSponsoredType']
|
||||
uid_search_string = "(&(objectclass=person)(uid=%s))"
|
||||
user_or_last_name_search_string = "(&(objectclass=person)(|(uid=%s*)(sn=%s*)))"
|
||||
|
||||
def __init__(self):
|
||||
if app.config['TESTING']:
|
||||
|
@ -50,10 +64,11 @@ class LdapService(object):
|
|||
if len(self.conn.entries) < 1:
|
||||
raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
|
||||
entry = self.conn.entries[0]
|
||||
return(LdapUserInfo(entry))
|
||||
return LdapUserInfo.from_entry(entry)
|
||||
|
||||
def search_users(self, query, limit):
|
||||
search_string = LdapService.uid_search_string % query
|
||||
if len(query) < 3: return []
|
||||
search_string = LdapService.user_or_last_name_search_string % (query, query)
|
||||
self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
|
||||
|
||||
# Entries are returned as a generator, accessing entries
|
||||
|
@ -64,6 +79,6 @@ class LdapService(object):
|
|||
for entry in self.conn.entries:
|
||||
if count > limit:
|
||||
break
|
||||
results.append(LdapUserInfo(entry))
|
||||
results.append(LdapUserInfo.from_entry(entry))
|
||||
count += 1
|
||||
return results
|
||||
|
|
|
@ -1,13 +1,24 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from pandas import ExcelFile
|
||||
from sqlalchemy import func, desc
|
||||
from sqlalchemy.sql.functions import GenericFunction
|
||||
|
||||
from crc import db
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.api_models import Task
|
||||
from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
|
||||
from crc.models.workflow import WorkflowModel, WorkflowSpecDependencyFile
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.ldap_service import LdapService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class TSRank(GenericFunction):
|
||||
package = 'full_text'
|
||||
name = 'ts_rank'
|
||||
|
||||
class LookupService(object):
|
||||
|
||||
"""Provides tools for doing lookups for auto-complete fields.
|
||||
|
@ -24,33 +35,56 @@ class LookupService(object):
|
|||
"""
|
||||
|
||||
@staticmethod
|
||||
def lookup(spiff_task, field, query, limit):
|
||||
"""Executes the lookup for the given field."""
|
||||
if field.type != Task.FIELD_TYPE_AUTO_COMPLETE:
|
||||
raise ApiError.from_task("invalid_field_type",
|
||||
"Field '%s' must be an autocomplete field to use lookups." % field.label,
|
||||
task=spiff_task)
|
||||
|
||||
# If this field has an associated options file, then do the lookup against that field.
|
||||
if field.has_property(Task.PROP_OPTIONS_FILE):
|
||||
lookup_table = LookupService.get_lookup_table(spiff_task, field)
|
||||
return LookupService._run_lookup_query(lookup_table, query, limit)
|
||||
# If this is a ldap lookup, use the ldap service to provide the fields to return.
|
||||
elif field.has_property(Task.PROP_LDAP_LOOKUP):
|
||||
return LookupService._run_ldap_query(query, limit)
|
||||
else:
|
||||
raise ApiError.from_task("unknown_lookup_option",
|
||||
"Lookup supports using spreadsheet options or ldap options, and neither was"
|
||||
"provided.")
|
||||
def get_lookup_model(spiff_task, field):
|
||||
workflow_id = spiff_task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
return LookupService.__get_lookup_model(workflow, field.id)
|
||||
|
||||
@staticmethod
|
||||
def get_lookup_table(spiff_task, field):
|
||||
""" Checks to see if the options are provided in a separate lookup table associated with the
|
||||
def __get_lookup_model(workflow, field_id):
|
||||
lookup_model = db.session.query(LookupFileModel) \
|
||||
.filter(LookupFileModel.workflow_spec_id == workflow.workflow_spec_id) \
|
||||
.filter(LookupFileModel.field_id == field_id).first()
|
||||
|
||||
# one more quick query, to see if the lookup file is still related to this workflow.
|
||||
# if not, we need to rebuild the lookup table.
|
||||
is_current = False
|
||||
if lookup_model:
|
||||
is_current = db.session.query(WorkflowSpecDependencyFile).\
|
||||
filter(WorkflowSpecDependencyFile.file_data_id == lookup_model.file_data_model_id).count()
|
||||
|
||||
if not is_current:
|
||||
if lookup_model:
|
||||
db.session.delete(lookup_model)
|
||||
# Very very very expensive, but we don't know need this till we do.
|
||||
lookup_model = LookupService.create_lookup_model(workflow, field_id)
|
||||
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def lookup(workflow, field_id, query, limit):
|
||||
|
||||
lookup_model = LookupService.__get_lookup_model(workflow, field_id)
|
||||
|
||||
if lookup_model.is_ldap:
|
||||
return LookupService._run_ldap_query(query, limit)
|
||||
else:
|
||||
return LookupService._run_lookup_query(lookup_model, query, limit)
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def create_lookup_model(workflow_model, field_id):
|
||||
"""
|
||||
This is all really expensive, but should happen just once (per file change).
|
||||
Checks to see if the options are provided in a separate lookup table associated with the
|
||||
workflow, and if so, assures that data exists in the database, and return a model than can be used
|
||||
to locate that data.
|
||||
|
||||
Returns: an array of LookupData, suitable for returning to the api.
|
||||
"""
|
||||
processor = WorkflowProcessor(workflow_model) # VERY expensive, Ludicrous for lookup / type ahead
|
||||
spiff_task, field = processor.find_task_and_field_by_field_id(field_id)
|
||||
|
||||
if field.has_property(Task.PROP_OPTIONS_FILE):
|
||||
if not field.has_property(Task.PROP_OPTIONS_VALUE_COLUMN) or \
|
||||
not field.has_property(Task.PROP_OPTIONS_LABEL_COL):
|
||||
|
@ -65,67 +99,93 @@ class LookupService(object):
|
|||
file_name = field.get_property(Task.PROP_OPTIONS_FILE)
|
||||
value_column = field.get_property(Task.PROP_OPTIONS_VALUE_COLUMN)
|
||||
label_column = field.get_property(Task.PROP_OPTIONS_LABEL_COL)
|
||||
data_model = FileService.get_workflow_file_data(spiff_task.workflow, file_name)
|
||||
lookup_model = LookupService.get_lookup_table_from_data_model(data_model, value_column, label_column)
|
||||
return lookup_model
|
||||
latest_files = FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
workflow_id=workflow_model.id,
|
||||
name=file_name)
|
||||
if len(latest_files) < 1:
|
||||
raise ApiError("missing_file", "Unable to locate the lookup data file '%s'" % file_name)
|
||||
else:
|
||||
data_model = latest_files[0]
|
||||
|
||||
lookup_model = LookupService.build_lookup_table(data_model, value_column, label_column,
|
||||
workflow_model.workflow_spec_id, field_id)
|
||||
|
||||
elif field.has_property(Task.PROP_LDAP_LOOKUP):
|
||||
lookup_model = LookupFileModel(workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
field_id=field_id,
|
||||
is_ldap=True)
|
||||
else:
|
||||
raise ApiError("unknown_lookup_option",
|
||||
"Lookup supports using spreadsheet options or ldap options, and neither "
|
||||
"was provided.")
|
||||
db.session.add(lookup_model)
|
||||
db.session.commit()
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def get_lookup_table_from_data_model(data_model: FileDataModel, value_column, label_column):
|
||||
def build_lookup_table(data_model: FileDataModel, value_column, label_column, workflow_spec_id, field_id):
|
||||
""" In some cases the lookup table can be very large. This method will add all values to the database
|
||||
in a way that can be searched and returned via an api call - rather than sending the full set of
|
||||
options along with the form. It will only open the file and process the options if something has
|
||||
changed. """
|
||||
xls = ExcelFile(data_model.data)
|
||||
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
|
||||
if value_column not in df:
|
||||
raise ApiError("invalid_emum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
value_column))
|
||||
if label_column not in df:
|
||||
raise ApiError("invalid_emum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
label_column))
|
||||
|
||||
lookup_model = db.session.query(LookupFileModel) \
|
||||
.filter(LookupFileModel.file_data_model_id == data_model.id) \
|
||||
.filter(LookupFileModel.value_column == value_column) \
|
||||
.filter(LookupFileModel.label_column == label_column).first()
|
||||
|
||||
if not lookup_model:
|
||||
xls = ExcelFile(data_model.data)
|
||||
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
|
||||
if value_column not in df:
|
||||
raise ApiError("invalid_emum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
value_column))
|
||||
if label_column not in df:
|
||||
raise ApiError("invalid_emum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
label_column))
|
||||
|
||||
lookup_model = LookupFileModel(label_column=label_column, value_column=value_column,
|
||||
file_data_model_id=data_model.id)
|
||||
|
||||
db.session.add(lookup_model)
|
||||
for index, row in df.iterrows():
|
||||
lookup_data = LookupDataModel(lookup_file_model=lookup_model,
|
||||
value=row[value_column],
|
||||
label=row[label_column],
|
||||
data=row.to_json())
|
||||
db.session.add(lookup_data)
|
||||
db.session.commit()
|
||||
lookup_model = LookupFileModel(workflow_spec_id=workflow_spec_id,
|
||||
field_id=field_id,
|
||||
file_data_model_id=data_model.id,
|
||||
is_ldap=False)
|
||||
|
||||
db.session.add(lookup_model)
|
||||
for index, row in df.iterrows():
|
||||
lookup_data = LookupDataModel(lookup_file_model=lookup_model,
|
||||
value=row[value_column],
|
||||
label=row[label_column],
|
||||
data=row.to_json())
|
||||
db.session.add(lookup_data)
|
||||
db.session.commit()
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def _run_lookup_query(lookup_file_model, query, limit):
|
||||
db_query = LookupDataModel.query.filter(LookupDataModel.lookup_file_model == lookup_file_model)
|
||||
|
||||
query = re.sub('[^A-Za-z0-9 ]+', '', query)
|
||||
print("Query: " + query)
|
||||
query = query.strip()
|
||||
if len(query) > 1:
|
||||
if len(query) > 0:
|
||||
if ' ' in query:
|
||||
terms = query.split(' ')
|
||||
new_terms = []
|
||||
new_terms = ["'%s'" % query]
|
||||
for t in terms:
|
||||
new_terms.append(t + ":*")
|
||||
query = '|'.join(new_terms)
|
||||
new_terms.append("%s:*" % t)
|
||||
new_query = ' | '.join(new_terms)
|
||||
else:
|
||||
query = "%s:*" % query
|
||||
db_query = db_query.filter(LookupDataModel.label.match(query))
|
||||
new_query = "%s:*" % query
|
||||
|
||||
# db_query = db_query.filter(text("lookup_data.label @@ to_tsquery('simple', '%s')" % query))
|
||||
# Run the full text query
|
||||
db_query = db_query.filter(LookupDataModel.label.match(new_query))
|
||||
# But hackishly order by like, which does a good job of
|
||||
# pulling more relevant matches to the top.
|
||||
db_query = db_query.order_by(desc(LookupDataModel.label.like("%" + query + "%")))
|
||||
#ORDER BY name LIKE concat('%', ticker, '%') desc, rank DESC
|
||||
|
||||
return db_query.limit(limit).all()
|
||||
# db_query = db_query.order_by(desc(func.full_text.ts_rank(
|
||||
# func.to_tsvector(LookupDataModel.label),
|
||||
# func.to_tsquery(query))))
|
||||
from sqlalchemy.dialects import postgresql
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
|
||||
result = db_query.limit(limit).all()
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _run_ldap_query(query, limit):
|
||||
|
|
|
@ -5,8 +5,7 @@ import requests
|
|||
|
||||
from crc import app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStudySchema, ProtocolBuilderInvestigator, \
|
||||
ProtocolBuilderRequiredDocument, ProtocolBuilderRequiredDocumentSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema, ProtocolBuilderRequiredDocument
|
||||
|
||||
|
||||
class ProtocolBuilderService(object):
|
||||
|
@ -15,8 +14,16 @@ class ProtocolBuilderService(object):
|
|||
REQUIRED_DOCS_URL = app.config['PB_REQUIRED_DOCS_URL']
|
||||
STUDY_DETAILS_URL = app.config['PB_STUDY_DETAILS_URL']
|
||||
|
||||
@staticmethod
|
||||
def is_enabled():
|
||||
if isinstance(app.config['PB_ENABLED'], str):
|
||||
return app.config['PB_ENABLED'].lower() == "true"
|
||||
else:
|
||||
return app.config['PB_ENABLED'] is True
|
||||
|
||||
@staticmethod
|
||||
def get_studies(user_id) -> {}:
|
||||
ProtocolBuilderService.__enabled_or_raise()
|
||||
if not isinstance(user_id, str):
|
||||
raise ApiError("invalid_user_id", "This user id is invalid: " + str(user_id))
|
||||
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
|
||||
|
@ -30,40 +37,31 @@ class ProtocolBuilderService(object):
|
|||
|
||||
@staticmethod
|
||||
def get_investigators(study_id) -> {}:
|
||||
ProtocolBuilderService.check_args(study_id)
|
||||
response = requests.get(ProtocolBuilderService.INVESTIGATOR_URL % study_id)
|
||||
if response.ok and response.text:
|
||||
pb_studies = json.loads(response.text)
|
||||
return pb_studies
|
||||
else:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder (status %s): %s" %
|
||||
(response.status_code, response.text))
|
||||
return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.INVESTIGATOR_URL)
|
||||
|
||||
@staticmethod
|
||||
def get_required_docs(study_id) -> Optional[List[ProtocolBuilderRequiredDocument]]:
|
||||
ProtocolBuilderService.check_args(study_id)
|
||||
response = requests.get(ProtocolBuilderService.REQUIRED_DOCS_URL % study_id)
|
||||
return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.REQUIRED_DOCS_URL)
|
||||
|
||||
@staticmethod
|
||||
def get_study_details(study_id) -> {}:
|
||||
return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.STUDY_DETAILS_URL)
|
||||
|
||||
@staticmethod
|
||||
def __enabled_or_raise():
|
||||
if not ProtocolBuilderService.is_enabled():
|
||||
raise ApiError("protocol_builder_disabled", "The Protocol Builder Service is currently disabled.")
|
||||
|
||||
@staticmethod
|
||||
def __make_request(study_id, url):
|
||||
ProtocolBuilderService.__enabled_or_raise()
|
||||
if not isinstance(study_id, int):
|
||||
raise ApiError("invalid_study_id", "This study id is invalid: " + str(study_id))
|
||||
response = requests.get(url % study_id)
|
||||
if response.ok and response.text:
|
||||
return json.loads(response.text)
|
||||
else:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder (status %s): %s" %
|
||||
(response.status_code, response.text))
|
||||
|
||||
@staticmethod
|
||||
def get_study_details(study_id) -> {}:
|
||||
ProtocolBuilderService.check_args(study_id)
|
||||
response = requests.get(ProtocolBuilderService.STUDY_DETAILS_URL % study_id)
|
||||
if response.ok and response.text:
|
||||
pb_study_details = json.loads(response.text)
|
||||
return pb_study_details
|
||||
else:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder (status %s): %s" %
|
||||
(response.status_code, response.text))
|
||||
|
||||
@staticmethod
|
||||
def check_args(study_id):
|
||||
if not isinstance(study_id, int):
|
||||
raise ApiError("invalid_study_id", "This study id is invalid: " + str(study_id))
|
||||
"Received an invalid response from the protocol builder (status %s): %s when calling "
|
||||
"url '%s'." %
|
||||
(response.status_code, response.text, url))
|
||||
|
|
|
@ -32,6 +32,17 @@ class StudyService(object):
|
|||
studies.append(StudyService.get_study(study_model.id, study_model))
|
||||
return studies
|
||||
|
||||
@staticmethod
|
||||
def get_all_studies_with_files():
|
||||
"""Returns a list of all studies"""
|
||||
db_studies = session.query(StudyModel).all()
|
||||
studies = []
|
||||
for s in db_studies:
|
||||
study = Study.from_model(s)
|
||||
study.files = FileService.get_files_for_study(study.id)
|
||||
studies.append(study)
|
||||
return studies
|
||||
|
||||
@staticmethod
|
||||
def get_study(study_id, study_model: StudyModel = None):
|
||||
"""Returns a study model that contains all the workflows organized by category.
|
||||
|
@ -42,6 +53,7 @@ class StudyService(object):
|
|||
study = Study.from_model(study_model)
|
||||
study.categories = StudyService.get_categories()
|
||||
workflow_metas = StudyService.__get_workflow_metas(study_id)
|
||||
study.files = FileService.get_files_for_study(study.id)
|
||||
|
||||
# Calling this line repeatedly is very very slow. It creates the
|
||||
# master spec and runs it.
|
||||
|
@ -58,16 +70,18 @@ class StudyService(object):
|
|||
def delete_study(study_id):
|
||||
session.query(TaskEventModel).filter_by(study_id=study_id).delete()
|
||||
for workflow in session.query(WorkflowModel).filter_by(study_id=study_id):
|
||||
StudyService.delete_workflow(workflow.id)
|
||||
StudyService.delete_workflow(workflow)
|
||||
session.query(StudyModel).filter_by(id=study_id).delete()
|
||||
session.commit()
|
||||
|
||||
@staticmethod
|
||||
def delete_workflow(workflow_id):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow_id).all():
|
||||
def delete_workflow(workflow):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||
FileService.delete_file(file.id)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow_id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow_id).delete()
|
||||
for deb in workflow.dependencies:
|
||||
session.delete(deb)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||
|
||||
@staticmethod
|
||||
def get_categories():
|
||||
|
@ -110,23 +124,29 @@ class StudyService(object):
|
|||
"""Returns a list of documents related to the study, and any file information
|
||||
that is available.."""
|
||||
|
||||
# Get PB required docs
|
||||
try:
|
||||
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
|
||||
except requests.exceptions.ConnectionError as ce:
|
||||
app.logger.error("Failed to connect to the Protocol Builder - %s" % str(ce))
|
||||
# Get PB required docs, if Protocol Builder Service is enabled.
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
try:
|
||||
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
|
||||
except requests.exceptions.ConnectionError as ce:
|
||||
app.logger.error("Failed to connect to the Protocol Builder - %s" % str(ce))
|
||||
pb_docs = []
|
||||
else:
|
||||
pb_docs = []
|
||||
|
||||
# Loop through all known document types, get the counts for those files, and use pb_docs to mark those required.
|
||||
# Loop through all known document types, get the counts for those files,
|
||||
# and use pb_docs to mark those as required.
|
||||
doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
||||
|
||||
documents = {}
|
||||
for code, doc in doc_dictionary.items():
|
||||
|
||||
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
|
||||
doc['required'] = False
|
||||
if pb_data:
|
||||
doc['required'] = True
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
|
||||
doc['required'] = False
|
||||
if pb_data:
|
||||
doc['required'] = True
|
||||
|
||||
doc['study_id'] = study_id
|
||||
doc['code'] = code
|
||||
|
||||
|
@ -138,22 +158,19 @@ class StudyService(object):
|
|||
doc['display_name'] = ' / '.join(name_list)
|
||||
|
||||
# For each file, get associated workflow status
|
||||
doc_files = FileService.get_files(study_id=study_id, irb_doc_code=code)
|
||||
doc_files = FileService.get_files_for_study(study_id=study_id, irb_doc_code=code)
|
||||
doc['count'] = len(doc_files)
|
||||
doc['files'] = []
|
||||
for file in doc_files:
|
||||
doc['files'].append({'file_id': file.id,
|
||||
'task_id': file.task_id,
|
||||
'workflow_id': file.workflow_id,
|
||||
'workflow_spec_id': file.workflow_spec_id})
|
||||
'workflow_id': file.workflow_id})
|
||||
|
||||
# update the document status to match the status of the workflow it is in.
|
||||
if not 'status' in doc or doc['status'] is None:
|
||||
if 'status' not in doc or doc['status'] is None:
|
||||
workflow: WorkflowModel = session.query(WorkflowModel).filter_by(id=file.workflow_id).first()
|
||||
doc['status'] = workflow.status.value
|
||||
|
||||
documents[code] = doc
|
||||
|
||||
return documents
|
||||
|
||||
|
||||
|
@ -201,34 +218,40 @@ class StudyService(object):
|
|||
|
||||
|
||||
@staticmethod
|
||||
def synch_all_studies_with_protocol_builder(user):
|
||||
def synch_with_protocol_builder_if_enabled(user):
|
||||
"""Assures that the studies we have locally for the given user are
|
||||
in sync with the studies available in protocol builder. """
|
||||
# Get studies matching this user from Protocol Builder
|
||||
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid)
|
||||
|
||||
# Get studies from the database
|
||||
db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
|
||||
# Update all studies from the protocol builder, create new studies as needed.
|
||||
# Futher assures that every active study (that does exist in the protocol builder)
|
||||
# has a reference to every available workflow (though some may not have started yet)
|
||||
for pb_study in pb_studies:
|
||||
db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
|
||||
if not db_study:
|
||||
db_study = StudyModel(id=pb_study.STUDYID)
|
||||
session.add(db_study)
|
||||
db_studies.append(db_study)
|
||||
db_study.update_from_protocol_builder(pb_study)
|
||||
StudyService._add_all_workflow_specs_to_study(db_study)
|
||||
app.logger.info("The Protocol Builder is enabled. app.config['PB_ENABLED'] = " +
|
||||
str(app.config['PB_ENABLED']))
|
||||
|
||||
# Mark studies as inactive that are no longer in Protocol Builder
|
||||
for study in db_studies:
|
||||
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
|
||||
if not pb_study:
|
||||
study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED
|
||||
# Get studies matching this user from Protocol Builder
|
||||
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid)
|
||||
|
||||
db.session.commit()
|
||||
# Get studies from the database
|
||||
db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()
|
||||
|
||||
# Update all studies from the protocol builder, create new studies as needed.
|
||||
# Futher assures that every active study (that does exist in the protocol builder)
|
||||
# has a reference to every available workflow (though some may not have started yet)
|
||||
for pb_study in pb_studies:
|
||||
db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
|
||||
if not db_study:
|
||||
db_study = StudyModel(id=pb_study.STUDYID)
|
||||
session.add(db_study)
|
||||
db_studies.append(db_study)
|
||||
db_study.update_from_protocol_builder(pb_study)
|
||||
StudyService._add_all_workflow_specs_to_study(db_study)
|
||||
|
||||
# Mark studies as inactive that are no longer in Protocol Builder
|
||||
for study in db_studies:
|
||||
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
|
||||
if not pb_study:
|
||||
study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED
|
||||
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def __update_status_of_workflow_meta(workflow_metas, status):
|
||||
|
@ -275,8 +298,8 @@ class StudyService(object):
|
|||
return WorkflowProcessor.run_master_spec(master_specs[0], study_model)
|
||||
|
||||
@staticmethod
|
||||
def _add_all_workflow_specs_to_study(study):
|
||||
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).all()
|
||||
def _add_all_workflow_specs_to_study(study_model:StudyModel):
|
||||
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study == study_model).all()
|
||||
existing_specs = list(m.workflow_spec_id for m in existing_models)
|
||||
new_specs = session.query(WorkflowSpecModel). \
|
||||
filter(WorkflowSpecModel.is_master_spec == False). \
|
||||
|
@ -285,15 +308,15 @@ class StudyService(object):
|
|||
errors = []
|
||||
for workflow_spec in new_specs:
|
||||
try:
|
||||
StudyService._create_workflow_model(study, workflow_spec)
|
||||
StudyService._create_workflow_model(study_model, workflow_spec)
|
||||
except WorkflowException as we:
|
||||
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
|
||||
return errors
|
||||
|
||||
@staticmethod
|
||||
def _create_workflow_model(study, spec):
|
||||
def _create_workflow_model(study: StudyModel, spec):
|
||||
workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
|
||||
study_id=study.id,
|
||||
study=study,
|
||||
workflow_spec_id=spec.id,
|
||||
last_updated=datetime.now())
|
||||
session.add(workflow_model)
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
import random
|
||||
import re
|
||||
import string
|
||||
import xml.etree.ElementTree as ElementTree
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
from SpiffWorkflow import Task as SpiffTask, WorkflowException
|
||||
from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine
|
||||
|
@ -13,14 +12,15 @@ from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
|||
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
||||
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
||||
from SpiffWorkflow.exceptions import WorkflowTaskExecException
|
||||
from SpiffWorkflow.operators import Operator
|
||||
from SpiffWorkflow.specs import WorkflowSpec
|
||||
from sqlalchemy import desc
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileDataModel, FileModel, FileType
|
||||
from crc.models.workflow import WorkflowStatus, WorkflowModel
|
||||
from crc.models.workflow import WorkflowStatus, WorkflowModel, WorkflowSpecDependencyFile
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.file_service import FileService
|
||||
|
||||
|
||||
class CustomBpmnScriptEngine(BpmnScriptEngine):
|
||||
|
@ -48,6 +48,11 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
mod = __import__(module_name, fromlist=[class_name])
|
||||
klass = getattr(mod, class_name)
|
||||
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
||||
if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data:
|
||||
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
|
||||
else:
|
||||
workflow_id = None
|
||||
|
||||
if not isinstance(klass(), Script):
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"This is an internal error. The script '%s:%s' you called " %
|
||||
|
@ -57,14 +62,22 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
||||
"""If this is running a validation, and not a normal process, then we want to
|
||||
mimic running the script, but not make any external calls or database changes."""
|
||||
klass().do_task_validate_only(task, study_id, *commands[1:])
|
||||
klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:])
|
||||
else:
|
||||
klass().do_task(task, study_id, *commands[1:])
|
||||
klass().do_task(task, study_id, workflow_id, *commands[1:])
|
||||
except ModuleNotFoundError:
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"Unable to locate Script: '%s:%s'" % (module_name, class_name),
|
||||
task=task)
|
||||
|
||||
def evaluate_expression(self, task, expression):
|
||||
"""
|
||||
Evaluate the given expression, within the context of the given task and
|
||||
return the result.
|
||||
"""
|
||||
exp, valid = self.validateExpression(expression)
|
||||
return self._eval(exp, **task.data)
|
||||
|
||||
@staticmethod
|
||||
def camel_to_snake(camel):
|
||||
camel = camel.strip()
|
||||
|
@ -87,7 +100,7 @@ class WorkflowProcessor(object):
|
|||
STUDY_ID_KEY = "study_id"
|
||||
VALIDATION_PROCESS_KEY = "validate_only"
|
||||
|
||||
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False):
|
||||
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False):
|
||||
"""Create a Workflow Processor based on the serialized information available in the workflow model.
|
||||
If soft_reset is set to true, it will try to use the latest version of the workflow specification.
|
||||
If hard_reset is set to true, it will create a new Workflow, but embed the data from the last
|
||||
|
@ -95,18 +108,22 @@ class WorkflowProcessor(object):
|
|||
If neither flag is set, it will use the same version of the specification that was used to originally
|
||||
create the workflow model. """
|
||||
self.workflow_model = workflow_model
|
||||
orig_version = workflow_model.spec_version
|
||||
if soft_reset or workflow_model.spec_version is None:
|
||||
self.workflow_model.spec_version = WorkflowProcessor.get_latest_version_string(
|
||||
workflow_model.workflow_spec_id)
|
||||
|
||||
spec = self.get_spec(workflow_model.workflow_spec_id, workflow_model.spec_version)
|
||||
if soft_reset or len(workflow_model.dependencies) == 0:
|
||||
self.spec_data_files = FileService.get_spec_data_files(
|
||||
workflow_spec_id=workflow_model.workflow_spec_id)
|
||||
else:
|
||||
self.spec_data_files = FileService.get_spec_data_files(
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
workflow_id=workflow_model.id)
|
||||
|
||||
spec = self.get_spec(self.spec_data_files, workflow_model.workflow_spec_id)
|
||||
self.workflow_spec_id = workflow_model.workflow_spec_id
|
||||
try:
|
||||
self.bpmn_workflow = self.__get_bpmn_workflow(workflow_model, spec)
|
||||
self.bpmn_workflow = self.__get_bpmn_workflow(workflow_model, spec, validate_only)
|
||||
self.bpmn_workflow.script_engine = self._script_engine
|
||||
|
||||
if not self.WORKFLOW_ID_KEY in self.bpmn_workflow.data:
|
||||
if self.WORKFLOW_ID_KEY not in self.bpmn_workflow.data:
|
||||
if not workflow_model.id:
|
||||
session.add(workflow_model)
|
||||
session.commit()
|
||||
|
@ -119,71 +136,63 @@ class WorkflowProcessor(object):
|
|||
self.save()
|
||||
|
||||
except KeyError as ke:
|
||||
if soft_reset:
|
||||
# Undo the soft-reset.
|
||||
workflow_model.spec_version = orig_version
|
||||
raise ApiError(code="unexpected_workflow_structure",
|
||||
message="Failed to deserialize workflow"
|
||||
" '%s' version %s, due to a mis-placed or missing task '%s'" %
|
||||
(self.workflow_spec_id, workflow_model.spec_version, str(ke)) +
|
||||
" This is very likely due to a soft reset where there was a structural change.")
|
||||
(self.workflow_spec_id, self.get_version_string(), str(ke)) +
|
||||
" This is very likely due to a soft reset where there was a structural change.")
|
||||
if hard_reset:
|
||||
# Now that the spec is loaded, get the data and rebuild the bpmn with the new details
|
||||
workflow_model.spec_version = self.hard_reset()
|
||||
self.hard_reset()
|
||||
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(self.bpmn_workflow)
|
||||
self.save()
|
||||
if soft_reset:
|
||||
self.save()
|
||||
|
||||
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec):
|
||||
# set whether this is the latest spec file.
|
||||
if self.spec_data_files == FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id):
|
||||
self.is_latest_spec = True
|
||||
else:
|
||||
self.is_latest_spec = False
|
||||
|
||||
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec, validate_only=False):
|
||||
if workflow_model.bpmn_workflow_json:
|
||||
bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json, workflow_spec=spec)
|
||||
else:
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only
|
||||
bpmn_workflow.do_engine_steps()
|
||||
return bpmn_workflow
|
||||
|
||||
def save(self):
|
||||
"""Saves the current state of this processor to the database """
|
||||
workflow_model = self.workflow_model
|
||||
workflow_model.bpmn_workflow_json = self.serialize()
|
||||
self.workflow_model.bpmn_workflow_json = self.serialize()
|
||||
complete_states = [SpiffTask.CANCELLED, SpiffTask.COMPLETED]
|
||||
tasks = list(self.get_all_user_tasks())
|
||||
workflow_model.status = self.get_status()
|
||||
workflow_model.total_tasks = len(tasks)
|
||||
workflow_model.completed_tasks = sum(1 for t in tasks if t.state in complete_states)
|
||||
workflow_model.last_updated = datetime.now()
|
||||
session.add(workflow_model)
|
||||
self.workflow_model.status = self.get_status()
|
||||
self.workflow_model.total_tasks = len(tasks)
|
||||
self.workflow_model.completed_tasks = sum(1 for t in tasks if t.state in complete_states)
|
||||
self.workflow_model.last_updated = datetime.now()
|
||||
self.update_dependencies(self.spec_data_files)
|
||||
session.add(self.workflow_model)
|
||||
session.commit()
|
||||
|
||||
@staticmethod
|
||||
def run_master_spec(spec_model, study):
|
||||
"""Executes a BPMN specification for the given study, without recording any information to the database
|
||||
Useful for running the master specification, which should not persist. """
|
||||
version = WorkflowProcessor.get_latest_version_string(spec_model.id)
|
||||
spec = WorkflowProcessor.get_spec(spec_model.id, version)
|
||||
try:
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=WorkflowProcessor._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.do_engine_steps()
|
||||
except WorkflowException as we:
|
||||
raise ApiError.from_task_spec("error_running_master_spec", str(we), we.sender)
|
||||
|
||||
if not bpmn_workflow.is_completed():
|
||||
raise ApiError("master_spec_not_automatic",
|
||||
"The master spec should only contain fully automated tasks, it failed to complete.")
|
||||
|
||||
return bpmn_workflow.last_task.data
|
||||
def get_version_string(self):
|
||||
# this could potentially become expensive to load all the data in the data models.
|
||||
# in which case we might consider using a deferred loader for the actual data, but
|
||||
# trying not to pre-optimize.
|
||||
file_data_models = FileService.get_spec_data_files(self.workflow_model.workflow_spec_id,
|
||||
self.workflow_model.id)
|
||||
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
||||
|
||||
@staticmethod
|
||||
def get_parser():
|
||||
parser = MyCustomParser()
|
||||
return parser
|
||||
def get_latest_version_string_for_spec(spec_id):
|
||||
file_data_models = FileService.get_spec_data_files(spec_id)
|
||||
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
||||
|
||||
@staticmethod
|
||||
def get_latest_version_string(workflow_spec_id):
|
||||
def __get_version_string_for_data_models(file_data_models):
|
||||
"""Version is in the format v[VERSION] (FILE_ID_LIST)
|
||||
For example, a single bpmn file with only one version would be
|
||||
v1 (12) Where 12 is the id of the file data model that is used to create the
|
||||
|
@ -192,10 +201,6 @@ class WorkflowProcessor(object):
|
|||
a Spec that includes a BPMN, DMN, an a Word file all on the first
|
||||
version would be v1.1.1 (12.45.21)"""
|
||||
|
||||
# this could potentially become expensive to load all the data in the data models.
|
||||
# in which case we might consider using a deferred loader for the actual data, but
|
||||
# trying not to pre-optimize.
|
||||
file_data_models = WorkflowProcessor.__get_latest_file_models(workflow_spec_id)
|
||||
major_version = 0 # The version of the primary file.
|
||||
minor_version = [] # The versions of the minor files if any.
|
||||
file_ids = []
|
||||
|
@ -211,103 +216,78 @@ class WorkflowProcessor(object):
|
|||
full_version = "v%s (%s)" % (version, files)
|
||||
return full_version
|
||||
|
||||
@staticmethod
|
||||
def get_file_models_for_version(workflow_spec_id, version):
|
||||
file_id_strings = re.findall('\((.*)\)', version)[0].split(".")
|
||||
file_ids = [int(i) for i in file_id_strings]
|
||||
files = session.query(FileDataModel)\
|
||||
.join(FileModel) \
|
||||
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
||||
.filter(FileDataModel.id.in_(file_ids)).all()
|
||||
if len(files) != len(file_ids):
|
||||
raise ApiError("invalid_version",
|
||||
"The version '%s' of workflow specification '%s' is invalid. " %
|
||||
(version, workflow_spec_id) +
|
||||
" Unable to locate the correct files to recreate it.")
|
||||
return files
|
||||
|
||||
|
||||
def update_dependencies(self, spec_data_files):
|
||||
existing_dependencies = FileService.get_spec_data_files(
|
||||
workflow_spec_id=self.workflow_model.workflow_spec_id,
|
||||
workflow_id=self.workflow_model.id)
|
||||
|
||||
# Don't save the dependencies if they haven't changed.
|
||||
if existing_dependencies == spec_data_files:
|
||||
return
|
||||
|
||||
# Remove all existing dependencies, and replace them.
|
||||
self.workflow_model.dependencies = []
|
||||
for file_data in spec_data_files:
|
||||
self.workflow_model.dependencies.append(WorkflowSpecDependencyFile(file_data_id=file_data.id))
|
||||
|
||||
@staticmethod
|
||||
def __get_latest_file_models(workflow_spec_id):
|
||||
"""Returns all the latest files related to a workflow specification"""
|
||||
return session.query(FileDataModel) \
|
||||
.join(FileModel) \
|
||||
.filter(FileModel.workflow_spec_id == workflow_spec_id)\
|
||||
.filter(FileDataModel.version == FileModel.latest_version)\
|
||||
.order_by(FileModel.id)\
|
||||
.all()
|
||||
def run_master_spec(spec_model, study):
|
||||
"""Executes a BPMN specification for the given study, without recording any information to the database
|
||||
Useful for running the master specification, which should not persist. """
|
||||
spec_data_files = FileService.get_spec_data_files(spec_model.id)
|
||||
spec = WorkflowProcessor.get_spec(spec_data_files, spec_model.id)
|
||||
try:
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=WorkflowProcessor._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.do_engine_steps()
|
||||
except WorkflowException as we:
|
||||
raise ApiError.from_task_spec("error_running_master_spec", str(we), we.sender)
|
||||
|
||||
if not bpmn_workflow.is_completed():
|
||||
raise ApiError("master_spec_not_automatic",
|
||||
"The master spec should only contain fully automated tasks, it failed to complete.")
|
||||
|
||||
return bpmn_workflow.last_task.data
|
||||
|
||||
@staticmethod
|
||||
def get_spec(workflow_spec_id, version=None):
|
||||
"""Returns the requested version of the specification,
|
||||
or the latest version if none is specified."""
|
||||
def get_parser():
|
||||
parser = MyCustomParser()
|
||||
return parser
|
||||
|
||||
@staticmethod
|
||||
def get_spec(file_data_models: List[FileDataModel], workflow_spec_id):
|
||||
"""Returns a SpiffWorkflow specification for the given workflow spec,
|
||||
using the files provided. The Workflow_spec_id is only used to generate
|
||||
better error messages."""
|
||||
parser = WorkflowProcessor.get_parser()
|
||||
process_id = None
|
||||
|
||||
if version is None:
|
||||
file_data_models = WorkflowProcessor.__get_latest_file_models(workflow_spec_id)
|
||||
else:
|
||||
file_data_models = WorkflowProcessor.get_file_models_for_version(workflow_spec_id, version)
|
||||
|
||||
for file_data in file_data_models:
|
||||
if file_data.file_model.type == FileType.bpmn:
|
||||
bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
||||
if file_data.file_model.primary:
|
||||
process_id = WorkflowProcessor.get_process_id(bpmn)
|
||||
process_id = FileService.get_process_id(bpmn)
|
||||
parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
|
||||
elif file_data.file_model.type == FileType.dmn:
|
||||
dmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
|
||||
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
|
||||
if process_id is None:
|
||||
raise(ApiError(code="no_primary_bpmn_error",
|
||||
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
||||
raise (ApiError(code="no_primary_bpmn_error",
|
||||
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
||||
try:
|
||||
spec = parser.get_spec(process_id)
|
||||
except ValidationException as ve:
|
||||
raise ApiError(code="workflow_validation_error",
|
||||
message="Failed to parse Workflow Specification '%s' %s." % (workflow_spec_id, version) +
|
||||
message="Failed to parse Workflow Specification '%s'" % workflow_spec_id +
|
||||
"Error is %s" % str(ve),
|
||||
file_name=ve.filename,
|
||||
task_id=ve.id,
|
||||
tag=ve.tag)
|
||||
return spec
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def populate_form_with_random_data(task, task_api):
|
||||
"""populates a task with random data - useful for testing a spec."""
|
||||
|
||||
if not hasattr(task.task_spec, 'form'): return
|
||||
|
||||
form_data = {}
|
||||
for field in task_api.form.fields:
|
||||
if field.type == "enum":
|
||||
if len(field.options) > 0:
|
||||
form_data[field.id] = random.choice(field.options)
|
||||
else:
|
||||
raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
|
||||
" with no options" % field.id,
|
||||
task)
|
||||
elif field.type == "long":
|
||||
form_data[field.id] = random.randint(1, 1000)
|
||||
elif field.type == 'boolean':
|
||||
form_data[field.id] = random.choice([True, False])
|
||||
elif field.type == 'file':
|
||||
form_data[field.id] = random.randint(1, 100)
|
||||
elif field.type == 'files':
|
||||
form_data[field.id] = random.randrange(1, 100)
|
||||
else:
|
||||
form_data[field.id] = WorkflowProcessor._random_string()
|
||||
if task.data is None:
|
||||
task.data = {}
|
||||
task.data.update(form_data)
|
||||
|
||||
@staticmethod
|
||||
def _random_string(string_length=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(string_length))
|
||||
|
||||
@staticmethod
|
||||
def status_of(bpmn_workflow):
|
||||
if bpmn_workflow.is_completed():
|
||||
|
@ -325,8 +305,8 @@ class WorkflowProcessor(object):
|
|||
|
||||
Returns the new version.
|
||||
"""
|
||||
version = WorkflowProcessor.get_latest_version_string(self.workflow_spec_id)
|
||||
spec = WorkflowProcessor.get_spec(self.workflow_spec_id) # Force latest version by NOT specifying version
|
||||
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
|
||||
spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
|
||||
# spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
||||
bpmn_workflow.data = self.bpmn_workflow.data
|
||||
|
@ -334,14 +314,10 @@ class WorkflowProcessor(object):
|
|||
task.data = self.bpmn_workflow.last_task.data
|
||||
bpmn_workflow.do_engine_steps()
|
||||
self.bpmn_workflow = bpmn_workflow
|
||||
return version
|
||||
|
||||
def get_status(self):
|
||||
return self.status_of(self.bpmn_workflow)
|
||||
|
||||
def get_spec_version(self):
|
||||
return self.workflow_model.spec_version
|
||||
|
||||
def do_engine_steps(self):
|
||||
try:
|
||||
self.bpmn_workflow.do_engine_steps()
|
||||
|
@ -422,32 +398,18 @@ class WorkflowProcessor(object):
|
|||
return [t for t in all_tasks
|
||||
if not self.bpmn_workflow._is_engine_task(t.task_spec) and t.state in [t.COMPLETED, t.CANCELLED]]
|
||||
|
||||
@staticmethod
|
||||
def get_process_id(et_root: ElementTree.Element):
|
||||
process_elements = []
|
||||
for child in et_root:
|
||||
if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
|
||||
process_elements.append(child)
|
||||
|
||||
if len(process_elements) == 0:
|
||||
raise ValidationException('No executable process tag found')
|
||||
|
||||
# There are multiple root elements
|
||||
if len(process_elements) > 1:
|
||||
|
||||
# Look for the element that has the startEvent in it
|
||||
for e in process_elements:
|
||||
this_element: ElementTree.Element = e
|
||||
for child_element in list(this_element):
|
||||
if child_element.tag.endswith('startEvent'):
|
||||
return this_element.attrib['id']
|
||||
|
||||
raise ValidationException('No start event found in %s' % et_root.attrib['id'])
|
||||
|
||||
return process_elements[0].attrib['id']
|
||||
|
||||
def get_nav_item(self, task):
|
||||
for nav_item in self.bpmn_workflow.get_nav_list():
|
||||
if nav_item['task_id'] == task.id:
|
||||
return nav_item
|
||||
|
||||
def find_task_and_field_by_field_id(self, field_id):
|
||||
"""Tracks down a form field by name in the workflow spec,
|
||||
only looks at ready tasks. Returns a tuple of the task, and form"""
|
||||
for spiff_task in self.bpmn_workflow.get_tasks():
|
||||
if hasattr(spiff_task.task_spec, "form"):
|
||||
for field in spiff_task.task_spec.form.fields:
|
||||
if field.id == field_id:
|
||||
return spiff_task, field
|
||||
raise ApiError("invalid_field",
|
||||
"Unable to find a task in the workflow with a lookup field called: %s" % field_id)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
import string
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
import jinja2
|
||||
from SpiffWorkflow import Task as SpiffTask, WorkflowException
|
||||
|
@ -15,9 +17,14 @@ from crc import db, app
|
|||
from crc.api.common import ApiError
|
||||
from crc.models.api_models import Task, MultiInstanceType
|
||||
from crc.models.file import LookupDataModel
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowModel, WorkflowStatus
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine
|
||||
|
||||
|
||||
|
@ -34,30 +41,127 @@ class WorkflowService(object):
|
|||
But for now, this contains tools for converting spiff-workflow models into our
|
||||
own API models with additional information and capabilities."""
|
||||
|
||||
@classmethod
|
||||
def test_spec(cls, spec_id):
|
||||
@staticmethod
|
||||
def make_test_workflow(spec_id):
|
||||
user = db.session.query(UserModel).filter_by(uid="test").first()
|
||||
if not user:
|
||||
db.session.add(UserModel(uid="test"))
|
||||
study = db.session.query(StudyModel).filter_by(user_uid="test").first()
|
||||
if not study:
|
||||
db.session.add(StudyModel(user_uid="test", title="test"))
|
||||
db.session.commit()
|
||||
workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
|
||||
workflow_spec_id=spec_id,
|
||||
last_updated=datetime.now(),
|
||||
study=study)
|
||||
return workflow_model
|
||||
|
||||
@staticmethod
|
||||
def delete_test_data():
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
||||
StudyService.delete_study(study.id)
|
||||
db.session.commit()
|
||||
db.session.query(UserModel).filter_by(uid="test").delete()
|
||||
|
||||
@staticmethod
|
||||
def test_spec(spec_id):
|
||||
"""Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good
|
||||
sanity check."""
|
||||
version = WorkflowProcessor.get_latest_version_string(spec_id)
|
||||
spec = WorkflowProcessor.get_spec(spec_id, version)
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=CustomBpmnScriptEngine())
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = 1
|
||||
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = spec_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = True
|
||||
|
||||
while not bpmn_workflow.is_completed():
|
||||
workflow_model = WorkflowService.make_test_workflow(spec_id)
|
||||
|
||||
try:
|
||||
processor = WorkflowProcessor(workflow_model, validate_only=True)
|
||||
except WorkflowException as we:
|
||||
WorkflowService.delete_test_data()
|
||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
||||
we.sender)
|
||||
|
||||
while not processor.bpmn_workflow.is_completed():
|
||||
try:
|
||||
bpmn_workflow.do_engine_steps()
|
||||
tasks = bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
processor.bpmn_workflow.do_engine_steps()
|
||||
tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
for task in tasks:
|
||||
task_api = WorkflowService.spiff_task_to_api_task(
|
||||
task,
|
||||
add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors.
|
||||
WorkflowProcessor.populate_form_with_random_data(task, task_api)
|
||||
WorkflowService.populate_form_with_random_data(task, task_api)
|
||||
task.complete()
|
||||
except WorkflowException as we:
|
||||
WorkflowService.delete_test_data()
|
||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
||||
we.sender)
|
||||
WorkflowService.delete_test_data()
|
||||
|
||||
@staticmethod
|
||||
def populate_form_with_random_data(task, task_api):
|
||||
"""populates a task with random data - useful for testing a spec."""
|
||||
|
||||
if not hasattr(task.task_spec, 'form'): return
|
||||
|
||||
form_data = {}
|
||||
for field in task_api.form.fields:
|
||||
if field.type == "enum":
|
||||
if len(field.options) > 0:
|
||||
random_choice = random.choice(field.options)
|
||||
if isinstance(random_choice, dict):
|
||||
form_data[field.id] = random.choice(field.options)['id']
|
||||
else:
|
||||
# fixme: why it is sometimes an EnumFormFieldOption, and other times not?
|
||||
form_data[field.id] = random_choice.id ## Assume it is an EnumFormFieldOption
|
||||
else:
|
||||
raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
|
||||
" with no options" % field.id,
|
||||
task)
|
||||
elif field.type == "autocomplete":
|
||||
lookup_model = LookupService.get_lookup_model(task, field)
|
||||
if field.has_property(Task.PROP_LDAP_LOOKUP):
|
||||
form_data[field.id] = {
|
||||
"label": "dhf8r",
|
||||
"value": "Dan Funk",
|
||||
"data": {
|
||||
"uid": "dhf8r",
|
||||
"display_name": "Dan Funk",
|
||||
"given_name": "Dan",
|
||||
"email_address": "dhf8r@virginia.edu",
|
||||
"department": "Depertment of Psychocosmographictology",
|
||||
"affiliation": "Rousabout",
|
||||
"sponsor_type": "Staff"
|
||||
}
|
||||
}
|
||||
elif lookup_model:
|
||||
data = db.session.query(LookupDataModel).filter(
|
||||
LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
|
||||
options = []
|
||||
for d in data:
|
||||
options.append({"id": d.value, "name": d.label})
|
||||
form_data[field.id] = random.choice(options)
|
||||
else:
|
||||
raise ApiError.from_task("invalid_autocomplete", "The settings for this auto complete field "
|
||||
"are incorrect: %s " % field.id, task)
|
||||
elif field.type == "long":
|
||||
form_data[field.id] = random.randint(1, 1000)
|
||||
elif field.type == 'boolean':
|
||||
form_data[field.id] = random.choice([True, False])
|
||||
elif field.type == 'file':
|
||||
form_data[field.id] = random.randint(1, 100)
|
||||
elif field.type == 'files':
|
||||
form_data[field.id] = random.randrange(1, 100)
|
||||
else:
|
||||
form_data[field.id] = WorkflowService._random_string()
|
||||
if task.data is None:
|
||||
task.data = {}
|
||||
task.data.update(form_data)
|
||||
|
||||
def __get_options(self):
|
||||
pass
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _random_string(string_length=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(string_length))
|
||||
|
||||
@staticmethod
|
||||
def spiff_task_to_api_task(spiff_task, add_docs_and_forms=False):
|
||||
|
@ -176,12 +280,12 @@ class WorkflowService(object):
|
|||
|
||||
@staticmethod
|
||||
def process_options(spiff_task, field):
|
||||
lookup_model = LookupService.get_lookup_table(spiff_task, field)
|
||||
|
||||
# If this is an auto-complete field, do not populate options, a lookup will happen later.
|
||||
if field.type == Task.FIELD_TYPE_AUTO_COMPLETE:
|
||||
pass
|
||||
else:
|
||||
elif field.has_property(Task.PROP_OPTIONS_FILE):
|
||||
lookup_model = LookupService.get_lookup_model(spiff_task, field)
|
||||
data = db.session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_model).all()
|
||||
if not hasattr(field, 'options'):
|
||||
field.options = []
|
||||
|
@ -197,7 +301,7 @@ class WorkflowService(object):
|
|||
user_uid=g.user.uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=workflow_model.spec_version,
|
||||
spec_version=processor.get_version_string(),
|
||||
action=action,
|
||||
task_id=task.id,
|
||||
task_name=task.name,
|
||||
|
@ -212,3 +316,4 @@ class WorkflowService(object):
|
|||
)
|
||||
db.session.add(task_event)
|
||||
db.session.commit()
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" id="Definitions_0be39yr" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" id="Definitions_0be39yr" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_1cme33c" isExecutable="false">
|
||||
<bpmn:parallelGateway id="ParallelGateway_0ecwf3g">
|
||||
<bpmn:incoming>Flow_1wqp7vf</bpmn:incoming>
|
||||
|
@ -36,10 +36,9 @@
|
|||
</camunda:formField>
|
||||
<camunda:formField id="ProtocolOwnerName" label="Protocol Owner Name" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="enum.options.file" value="SponsorList.xls" />
|
||||
<camunda:property id="enum.options.value.column" value="CUSTOMER_NUMBER" />
|
||||
<camunda:property id="enum.options.label.column" value="CUSTOMER_NAME" />
|
||||
<camunda:property id="enum.options.lookup" value="True" />
|
||||
<camunda:property id="spreadsheet.name" value="SponsorList.xls" />
|
||||
<camunda:property id="spreadsheet.value.column" value="CUSTOMER_NUMBER" />
|
||||
<camunda:property id="spreadsheet.label.column" value="CUSTOMER_NAME" />
|
||||
<camunda:property id="help" value="#### How To:\nYou can find the name by typing any part (at least 3 characters) of the name.\n\nNote: This source of this list is in the Integration System (Oracle) and the information is owned by and managed by the OSP team.\n\nIf you are not finding the name or need to make any changes.\n1. Email 'Information Team listserve' osp-infoteam@virginia.edu with the Subject Line "Requesting New Sponsor Setup" and provide the following information:\n - Sponsor Legal Name, Address, Sponsor Classification (Federal Government, Foreign Entity, Foundation, Industry, Local Government, Other Colleges & Universities or State Government) as stated in the agreement/notification.\n - Copies of the agreement from the sponsor (contract, award letter, email, etc.).\n2. Once all the required information is received, OSP will add the name to the list.\n3. The updated list should be available for your selection in the workflow within 2 business days." />
|
||||
<camunda:property id="description" value="The protocol owner name is always an entity. For example, if this is a UVA Primary Investigator - Investigator initiated study, the Protocol Owner Name will be "University of Virginia"" />
|
||||
</camunda:properties>
|
||||
|
@ -235,104 +234,104 @@
|
|||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_1cme33c">
|
||||
<bpmndi:BPMNEdge id="Flow_1wqp7vf_di" bpmnElement="Flow_1wqp7vf">
|
||||
<di:waypoint x="450" y="325" />
|
||||
<di:waypoint x="495" y="325" />
|
||||
<di:waypoint x="820" y="325" />
|
||||
<di:waypoint x="865" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1tfyk5m_di" bpmnElement="Flow_1tfyk5m">
|
||||
<di:waypoint x="300" y="325" />
|
||||
<di:waypoint x="350" y="325" />
|
||||
<di:waypoint x="670" y="325" />
|
||||
<di:waypoint x="720" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1d4dncx_di" bpmnElement="Flow_1d4dncx">
|
||||
<di:waypoint x="520" y="300" />
|
||||
<di:waypoint x="520" y="250" />
|
||||
<di:waypoint x="620" y="250" />
|
||||
<di:waypoint x="890" y="300" />
|
||||
<di:waypoint x="890" y="250" />
|
||||
<di:waypoint x="990" y="250" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_16v64sg_di" bpmnElement="Flow_16v64sg">
|
||||
<di:waypoint x="140" y="325" />
|
||||
<di:waypoint x="200" y="325" />
|
||||
<di:waypoint x="510" y="325" />
|
||||
<di:waypoint x="570" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_09h1imz_di" bpmnElement="Flow_09h1imz">
|
||||
<di:waypoint x="-20" y="325" />
|
||||
<di:waypoint x="40" y="325" />
|
||||
<di:waypoint x="350" y="325" />
|
||||
<di:waypoint x="410" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1v7oplk_di" bpmnElement="SequenceFlow_1v7oplk">
|
||||
<di:waypoint x="845" y="325" />
|
||||
<di:waypoint x="898" y="325" />
|
||||
<di:waypoint x="1215" y="325" />
|
||||
<di:waypoint x="1268" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0rw17h2_di" bpmnElement="SequenceFlow_0rw17h2">
|
||||
<di:waypoint x="720" y="500" />
|
||||
<di:waypoint x="820" y="500" />
|
||||
<di:waypoint x="820" y="350" />
|
||||
<di:waypoint x="1090" y="500" />
|
||||
<di:waypoint x="1190" y="500" />
|
||||
<di:waypoint x="1190" y="350" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0gsy7mo_di" bpmnElement="SequenceFlow_0gsy7mo">
|
||||
<di:waypoint x="720" y="380" />
|
||||
<di:waypoint x="820" y="380" />
|
||||
<di:waypoint x="820" y="350" />
|
||||
<di:waypoint x="1090" y="380" />
|
||||
<di:waypoint x="1190" y="380" />
|
||||
<di:waypoint x="1190" y="350" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1o39rt4_di" bpmnElement="SequenceFlow_1o39rt4">
|
||||
<di:waypoint x="720" y="250" />
|
||||
<di:waypoint x="820" y="250" />
|
||||
<di:waypoint x="820" y="300" />
|
||||
<di:waypoint x="1090" y="250" />
|
||||
<di:waypoint x="1190" y="250" />
|
||||
<di:waypoint x="1190" y="300" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_02nbqkn_di" bpmnElement="SequenceFlow_02nbqkn">
|
||||
<di:waypoint x="720" y="130" />
|
||||
<di:waypoint x="820" y="130" />
|
||||
<di:waypoint x="820" y="300" />
|
||||
<di:waypoint x="1090" y="130" />
|
||||
<di:waypoint x="1190" y="130" />
|
||||
<di:waypoint x="1190" y="300" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0xj8i4c_di" bpmnElement="SequenceFlow_0xj8i4c">
|
||||
<di:waypoint x="520" y="350" />
|
||||
<di:waypoint x="520" y="500" />
|
||||
<di:waypoint x="620" y="500" />
|
||||
<di:waypoint x="890" y="350" />
|
||||
<di:waypoint x="890" y="500" />
|
||||
<di:waypoint x="990" y="500" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1idbomg_di" bpmnElement="SequenceFlow_1idbomg">
|
||||
<di:waypoint x="520" y="350" />
|
||||
<di:waypoint x="520" y="380" />
|
||||
<di:waypoint x="620" y="380" />
|
||||
<di:waypoint x="890" y="350" />
|
||||
<di:waypoint x="890" y="380" />
|
||||
<di:waypoint x="990" y="380" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0f61fxp_di" bpmnElement="SequenceFlow_0f61fxp">
|
||||
<di:waypoint x="520" y="300" />
|
||||
<di:waypoint x="520" y="130" />
|
||||
<di:waypoint x="620" y="130" />
|
||||
<di:waypoint x="890" y="300" />
|
||||
<di:waypoint x="890" y="130" />
|
||||
<di:waypoint x="990" y="130" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1r3yrhy_di" bpmnElement="SequenceFlow_1r3yrhy">
|
||||
<di:waypoint x="-182" y="325" />
|
||||
<di:waypoint x="-120" y="325" />
|
||||
<di:waypoint x="188" y="325" />
|
||||
<di:waypoint x="250" y="325" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="ParallelGateway_0ecwf3g_di" bpmnElement="ParallelGateway_0ecwf3g">
|
||||
<dc:Bounds x="495" y="300" width="50" height="50" />
|
||||
<dc:Bounds x="865" y="300" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ParallelGateway_01234ff_di" bpmnElement="ParallelGateway_01234ff">
|
||||
<dc:Bounds x="795" y="300" width="50" height="50" />
|
||||
<dc:Bounds x="1165" y="300" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_16uwhzg_di" bpmnElement="EndEvent_16uwhzg">
|
||||
<dc:Bounds x="898" y="307" width="36" height="36" />
|
||||
<dc:Bounds x="1268" y="307" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="StartEvent_1mhzkcr_di" bpmnElement="StartEvent_1mhzkcr">
|
||||
<dc:Bounds x="-218" y="307" width="36" height="36" />
|
||||
<dc:Bounds x="152" y="307" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_1y1qon7_di" bpmnElement="UserTask_1y1qon7">
|
||||
<dc:Bounds x="620" y="340" width="100" height="80" />
|
||||
<dc:Bounds x="990" y="340" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_01zzzg9_di" bpmnElement="UserTask_01zzzg9">
|
||||
<dc:Bounds x="620" y="460" width="100" height="80" />
|
||||
<dc:Bounds x="990" y="460" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_0gtuk1e_di" bpmnElement="UserTask_EnterMultiSiteInfo">
|
||||
<dc:Bounds x="620" y="210" width="100" height="80" />
|
||||
<dc:Bounds x="990" y="210" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_0ebxkp7_di" bpmnElement="UserTask_0ebxkp7">
|
||||
<dc:Bounds x="620" y="90" width="100" height="80" />
|
||||
<dc:Bounds x="990" y="90" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0vthni9_di" bpmnElement="Activity_10nxpt2">
|
||||
<dc:Bounds x="-120" y="285" width="100" height="80" />
|
||||
<dc:Bounds x="250" y="285" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0spxv8q_di" bpmnElement="Activity_PBMultiSiteCheckQ12">
|
||||
<dc:Bounds x="40" y="285" width="100" height="80" />
|
||||
<dc:Bounds x="410" y="285" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0ah6heg_di" bpmnElement="Activity_PBMultiSiteCheckQ14">
|
||||
<dc:Bounds x="200" y="285" width="100" height="80" />
|
||||
<dc:Bounds x="570" y="285" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0x7b58m_di" bpmnElement="Activity_PBMultiSiteCheckQ28">
|
||||
<dc:Bounds x="350" y="285" width="100" height="80" />
|
||||
<dc:Bounds x="720" y="285" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,357 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" id="Definitions_0crc2o7" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||
<decision id="Decision_ApprovalInfo" name="Approval Info">
|
||||
<decisionTable id="decisionTable_1">
|
||||
<input id="input_1" label="School">
|
||||
<inputExpression id="inputExpression_1" typeRef="string">
|
||||
<text>PISchool</text>
|
||||
</inputExpression>
|
||||
</input>
|
||||
<output id="OutputClause_1138fqx" label="School" name="ApprvlSchool" typeRef="string" />
|
||||
<output id="output_1" label="Approver 1" name="ApprvlApprvr1" typeRef="string" />
|
||||
<output id="OutputClause_0d10p7n" label="Approver 1 Name" name="ApprvlApprvrName1" typeRef="string" />
|
||||
<output id="OutputClause_0dcb1cr" label="Approver 1 Role" name="ApprvlApprovrRole1" typeRef="string" />
|
||||
<output id="OutputClause_0mftsw9" label="Approver 2" name="ApprvlApprvr2" typeRef="string" />
|
||||
<output id="OutputClause_0orlpz9" label="Approver 2 Name" name="ApprvlApprvrName2" typeRef="string" />
|
||||
<output id="OutputClause_0iuw224" label="Approver 2 Role" name="ApprvlApprvrRole2" typeRef="string" />
|
||||
<rule id="DecisionRule_1wge2nn">
|
||||
<inputEntry id="UnaryTests_0mfg1fu">
|
||||
<text>"Architecture"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_074qb28">
|
||||
<text>"Architecture"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0vb9mia">
|
||||
<text>PISupervisor.data.uid</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_00k4by2">
|
||||
<text>PISupervisor.data.display_name</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1pqm0u2">
|
||||
<text>"Supervisor"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_17oc2op">
|
||||
<text>"agc9a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1b0mjy0">
|
||||
<text>"Anselmo Canfora"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_08dc6cz">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1u9orsf">
|
||||
<inputEntry id="UnaryTests_08ormp3">
|
||||
<text>"Arts & Sciences"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_0ua8d03">
|
||||
<text>"Arts & Sciences"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1ejs3h2">
|
||||
<text>PISupervisor.data.uid</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1v7dfzn">
|
||||
<text>PISupervisor.data.display_name</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0h7eq4l">
|
||||
<text>"Supervisor"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_06pynb2">
|
||||
<text>"dh2t"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1b047rf">
|
||||
<text>"David Hill"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0x965p2">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0k7lbs6">
|
||||
<inputEntry id="UnaryTests_1tws5wb">
|
||||
<text>"Commerce"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_0y7bv69">
|
||||
<text>"Commerce"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0sz476a">
|
||||
<text>"dcs8f"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_11bvybc">
|
||||
<text>"David Smith"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0wbcswk">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0htwmws">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0do7f3z">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0ntf026">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0pcsd6s">
|
||||
<inputEntry id="UnaryTests_07uijrb">
|
||||
<text>"Darden"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1he7hp6">
|
||||
<text>"Darden"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0hdskgi">
|
||||
<text>"mw4m"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1urv25e">
|
||||
<text>"Maureen Wellen"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1uxxdv0">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0q81xjf">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0qypltu">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1rp7s1w">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0qlavkb">
|
||||
<inputEntry id="UnaryTests_1775ht5">
|
||||
<text>"Data Science"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1fxauop">
|
||||
<text>"Data Science"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0w0ksry">
|
||||
<text>"cws3v"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1mnndve">
|
||||
<text>"Claudia Scholz"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0847zci">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1xjonk2">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0g81mjb">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1rb6200">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0vw6027">
|
||||
<inputEntry id="UnaryTests_1hsal1b">
|
||||
<text>"Education"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1uwd4hj">
|
||||
<text>"Education"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1t3c4oj">
|
||||
<text>PISupervisor.data.uid</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_045f3hz">
|
||||
<text>PISupervisor.data.display_name</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0vkosy7">
|
||||
<text>"Supervisor"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1sj3i3e">
|
||||
<text>"cpb8g"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1f1zg5p">
|
||||
<text>"Catherine Bradshaw"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_11ml53c">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1rgdbw3">
|
||||
<inputEntry id="UnaryTests_022jcbh">
|
||||
<text>"Engineering"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_0lzmt29">
|
||||
<text>"Engineering"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0v4df1x">
|
||||
<text>"sb5mc"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1biq8m6">
|
||||
<text>"Susan Barker"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_16kmec1">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1bjsn8g">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1ycmwd4">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1rm1jw7">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1hy1sby">
|
||||
<inputEntry id="UnaryTests_1u52cey">
|
||||
<text>"Law"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1w3vu2k">
|
||||
<text>"Law"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1nrqe2j">
|
||||
<text>"kendrick"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_02ckxvj">
|
||||
<text>"Leslie Kendrick"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_19n45lv">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0nppbew">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_08qy2v8">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0n1f5l1">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1nmyhfi">
|
||||
<inputEntry id="UnaryTests_0uqd08s">
|
||||
<text>"Leadership & Public Policy"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_0dy02ei">
|
||||
<text>"Leadership & Public Policy"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0rxrqkb">
|
||||
<text>"jps3va"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_05wkbco">
|
||||
<text>"Jay Shimshack"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0ejmi7q">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1ph2wlx">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1q9esl3">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0i6fih4">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_17uxicj">
|
||||
<inputEntry id="UnaryTests_0kttpy1">
|
||||
<text>"Medicine"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1ops4nm">
|
||||
<text>"Medicine"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1cwa3r0">
|
||||
<text>PISupervisor.data.uid</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1y0r1vs">
|
||||
<text>PISupervisor.data.display_name</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0ouvo17">
|
||||
<text>"Supervisor"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_18rte97">
|
||||
<text>"mas3x"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0j7fp1z">
|
||||
<text>"Margaret Shupnik"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1rxh2p0">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1t4241f">
|
||||
<inputEntry id="UnaryTests_0xwnrta">
|
||||
<text>"Nursing"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1va63rh">
|
||||
<text>"Nursing"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_125fuie">
|
||||
<text>"jla7e"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1p94oot">
|
||||
<text>"Jeanne Alhusen"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0h1e3bv">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0e8b9ui">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0xky8fm">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1mq49yg">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0jnp1wp">
|
||||
<inputEntry id="UnaryTests_164zt99">
|
||||
<text>"Continuing Education"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1h81xwr">
|
||||
<text>"Continuing Education"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_12sudp0">
|
||||
<text>"ado4v"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0j7x2td">
|
||||
<text>"Angela Orebaugh"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1jkrv7z">
|
||||
<text>"Associate Research Dean"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0qelytu">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1tffqsf">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_19tc8kf">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1ynkili">
|
||||
<inputEntry id="UnaryTests_1dupb6e">
|
||||
<text>"Provost Office"</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1w13wuw">
|
||||
<text>"Provost Office"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_13xizrn">
|
||||
<text>"rammk"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0ixvqzn">
|
||||
<text>"Melur Ramasubramanian"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_166hajt">
|
||||
<text>"VP of Research"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_1xazzzn">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_14uq3iz">
|
||||
<text>"n/a"</text>
|
||||
</outputEntry>
|
||||
<outputEntry id="LiteralExpression_0zr3ar6">
|
||||
<text></text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
</decisionTable>
|
||||
</decision>
|
||||
</definitions>
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,907 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" id="Definitions_1oogn9j" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||
<bpmn:process id="Process_0ssahs9" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_05ja25w</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:manualTask id="ManualTask_Instructions" name="Read RRP Instructions">
|
||||
<bpmn:documentation>## **Beta Stage: All data entered will be destroyed before public launch**
|
||||
|
||||
|
||||
### UNIVERSITY OF VIRGINIA RESEARCH
|
||||
[From Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance)
|
||||
|
||||
|
||||
#### Support
|
||||
Report problems and/or submit questions to: askresearch@virginia.edu
|
||||
|
||||
#### Research Guidance
|
||||
|
||||
Our general principle is that only research activities requiring on-Grounds presence would be conducted on-Grounds. All other research-related work would continue to be performed by telework until restrictions are lifted. Separate school, department and building specific plans should supplement these guidelines.
|
||||
|
||||
|
||||
For research that needs to be on Grounds, the plan is to ramp up in phases with emphasis on safety. The goal of this document is to provide a central framework for resuming activities while allowing for coordinated school specific implementation strategies.
|
||||
|
||||
|
||||
The success of the ramp up depends on each researcher placing the safety of themselves and the people around them first, while conducting their research. In order to reduce our risks as much as possible, this must be a partnership between the researchers and the administration.
|
||||
|
||||
|
||||
Schools are developing a process for the approval of ramp up requests and enforcement of safety guidelines described in this document. The VPR office is working with the schools to provide the necessary support for business process infrastructure, and working with the COO’s office to coordinate the acquisition of supplies necessary including face coverings and sanitizing supplies.
|
||||
|
||||
**Instructions for Submitting:**
|
||||
|
||||
|
||||
1. The Research Ramp-up Plan allows for one request to be entered for a single Principle Investigator. In the form that follows enter the Primary Investigator this request is for and other identifying information. The PI's School and Supervisor will be used as needed for approval routing.
|
||||
2. Provide all available information in the forms that follow to provide an overview of where the research will resume, who will be involved, what supporting resources will be needed and what steps have been taken to assure compliance with [Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance).
|
||||
3. After all forms have been completed, you will be presented with the option to create your Research Recovery Plan in Word format. Download the document and review it. If you see any corrections that need to be made, return to the corresponding form and make the correction.
|
||||
4. Once the generated Research Recovery Plan is finalize, proceed to the Plan Submission step to submit your plan for approval.</bpmn:documentation>
|
||||
<bpmn:incoming>SequenceFlow_05ja25w</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_0h50bp3</bpmn:outgoing>
|
||||
</bpmn:manualTask>
|
||||
<bpmn:userTask id="Activity-PI_Info" name="Enter PI Info" camunda:formKey="PI Information">
|
||||
<bpmn:documentation>#### Primary Investigator Information
|
||||
Enter the following information for the PI submitting this request</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="PIComputingID" label="Primary Investigator" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="placeholder" value="wxy0z or Smith" />
|
||||
<camunda:property id="ldap.lookup" value="true" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
<camunda:constraint name="description" config="Find the PI by entering Computing ID or Last Name." />
|
||||
<camunda:constraint name="ldap.lookup" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PISchool" label="PI's School" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="SchoolList.xls" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="School Name" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptArchitecture" label="PI's Primary Architecture Department" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-Architecture.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Architecture") || model.PISchool === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptArtsSciences" label="PI's Primary Arts & Sciences Department" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-ArtsSciences.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Arts & Sciences") || model.PISchool === null" />
|
||||
<camunda:property id="description" value="Type key words to find department" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptEducation" label="PI's Primary Education Department" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-Education.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Education") || model.PISchool === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptEngineering" label="PI's Primary Engineering Department" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-Engineering.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Engineering") || model.PISchool === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptMedicine" label="PI's Primary Medicine Department/Center" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-Medicine.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Medicine") || model.PISchool === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptProvostOffice" label="PI's Primary Provost Office Department/Center" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="spreadsheet.name" value="DepartmentList-ProvostOffice.xlsx" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Label" />
|
||||
<camunda:property id="hide_expression" value="(model.PISchool && model.PISchool !== "Provost Office") || model.PISchool === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PIPrimaryDeptOther" label="Primary Department " type="string">
|
||||
<camunda:properties>
|
||||
<camunda:property id="hide_expression" value="(model.PIPrimaryDeptArchitecture === null && model.PIPrimaryDeptArtsSciences === null && model.PIPrimaryDeptEducation === null && model.PIPrimaryDeptEngineering === null && model.PIPrimaryDeptMedicine === null && model.PIPrimaryDeptProvostOffice === null) || (model.PIPrimaryDeptArchitecture !== "Other" && model.PIPrimaryDeptArtsSciences !== "Other" && model.PIPrimaryDeptEducation !== "Other" && model.PIPrimaryDeptEngineering !== "Other" && model.PIPrimaryDeptMedicine !== "Other" && model.PIPrimaryDeptProvostOffice !== "Other")" />
|
||||
<camunda:property id="description" value="Enter the PI's Primary Department " />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PISupervisor" label="Pi's Supervisor" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Find the PI's Supervisor by entering Computing I D or Last Name." />
|
||||
<camunda:property id="ldap.lookup" value="true" />
|
||||
<camunda:property id="placeholder" value="wxy0z or Smith" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="Required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>SequenceFlow_0h50bp3</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_16y8glw</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_0h50bp3" sourceRef="ManualTask_Instructions" targetRef="Activity-PI_Info" />
|
||||
<bpmn:sequenceFlow id="SequenceFlow_05ja25w" sourceRef="StartEvent_1" targetRef="ManualTask_Instructions" />
|
||||
<bpmn:userTask id="Personnel" name="Enter Personnel" camunda:formKey="Personnel">
|
||||
<bpmn:documentation>#### People for whom you are requesting access
|
||||
Provide information on all researchers you are requesting approval for reentry into the previously entered lab/research and/or office space(s) for conducting research on-Grounds. (If there are personnel already working in the space, include them).
|
||||
|
||||
**Note: no undergraduates will be allowed to work on-Grounds during Phase I.**
|
||||
|
||||
#### Exclusive Space previously entered
|
||||
{% for es in exclusive %}
|
||||
{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label }}
|
||||
{% else %}
|
||||
No exclusive space entered
|
||||
{% endfor %}
|
||||
|
||||
|
||||
#### Shared Space previously entered
|
||||
{% for ss in shared %}
|
||||
{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}
|
||||
{% else %}
|
||||
No shared space entered
|
||||
{% endfor %}</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="PersonnelComputingID" label="Computer ID" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Personnel" />
|
||||
<camunda:property id="ldap.lookup" value="true" />
|
||||
<camunda:property id="description" value="Find by entering Computing ID or Last Name." />
|
||||
<camunda:property id="placeholder" value="wxy0z or Smith" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PersonnelType" label="Personnel Type" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Personnel" />
|
||||
</camunda:properties>
|
||||
<camunda:value id="Faculty" name="Faculty" />
|
||||
<camunda:value id="AcademicResearcher" name="Academic Researcher" />
|
||||
<camunda:value id="Staff" name="Staff" />
|
||||
<camunda:value id="Postdoc" name="Postdoc" />
|
||||
<camunda:value id="DoctoralStudent" name="Doctoral Student" />
|
||||
<camunda:value id="UndeGraduate" name="Undergraduate" />
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PersonnelSpace" label="Space they will work in" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="rows" value="2" />
|
||||
<camunda:property id="autosize" value="true" />
|
||||
<camunda:property id="description" value="Provide building and room number for each lab space this person will work in" />
|
||||
<camunda:property id="repeat" value="Personnel" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PersonnelJustification" label="Research Justification" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Provide a brief description of this person’s research and justification why this is critical research." />
|
||||
<camunda:property id="rows" value="3" />
|
||||
<camunda:property id="autosize" value="true" />
|
||||
<camunda:property id="repeat" value="Personnel" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PersonnelWeeklySchedule" label="Personnel Weekly Schedule" type="files">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Upload a file or files of proposed weekly schedules for all personnel which are representative of compliance with ramp-up guidance." />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_1eiud85</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1nbjr72</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:userTask id="UserTask_CoreResource" name="Enter Core Resources" camunda:formKey="Core Resources">
|
||||
<bpmn:documentation>#### If applicable, provide a list of any [Core Resources](https://research.virginia.edu/research-core-resources) you will utilize space or instruments in and name/email of contact person in the core you have coordinated your plan with. (Core facility managers are responsible for developing a plan for their space)</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="isCoreResourcesUse" label="Core Resources Use" type="boolean">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Will you need access to core resources to resume research?" />
|
||||
<camunda:property id="help" value="[From Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance)\n#### ADDITIONAL CONSIDERATIONS FOR A SUCCESSFUL RAMP-UP:\n##### Lab-based Research\n- (1) Core facilities operational\n\n[EHS Lab Ramp up Checklist for Laboratories](https://research.virginia.edu/sites/vpr/files/2020-05/EHS.LabRampUpChecklistForLaboratories_0_0.pdf)\n##### General\n- Note that shared facilities, such as stockrooms or core labs, may be on different ramp up schedules or in more demand than during normal operation." />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="CoreResources" label="Core Resources" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="rows" value="10" />
|
||||
<camunda:property id="autosize" value="true" />
|
||||
<camunda:property id="hide_expression" value="!model.isCoreResourcesUse | model.isCoreResourcesUse == null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_15zy1q7</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_12ie6w0</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:endEvent id="EndEvent_09wp7av">
|
||||
<bpmn:documentation>#### End of Workflow
|
||||
Place instruction here,</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_05w8yd6</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:sequenceFlow id="Flow_1e2qi9s" sourceRef="Activity_AcknowledgePlanReview" targetRef="Activity_ApprovalInfo" />
|
||||
<bpmn:manualTask id="Activity_AcknowledgePlanReview" name="Acknowledge Plan Review">
|
||||
<bpmn:documentation>Your Research Ramp-up Plan has been generated and is available in the Files pop-out found in the upper right hand corner of this application Click on the file name link to download the MS Word file, open and review. If changes are needed, choose the appropriate menu choice to make your edits, clicking Save when done. Note that you will need to revisit subsequent steps so the application can check to see if your edits impacted future workflow decisions. All your data will be persevered and you will need to click the Save button on each step to proceed.
|
||||
|
||||
When your Research Ramp-up Plan is complete and ready to submit for review and approval, click the Continue button below.</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_0aqgwvu</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1e2qi9s</bpmn:outgoing>
|
||||
</bpmn:manualTask>
|
||||
<bpmn:userTask id="Activity_SharedSpaceInfo" name="Enter Shared Space" camunda:formKey="Space Involved in this Request">
|
||||
<bpmn:documentation>#### Space used by {{ PIComputingID.label }} and shared with other PIs. If all space is exclusive and not shared with one or more other investigators, Click Save to skip this section and proceed to the next section.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="SharedSpaceBuilding" label="Building Name" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Select the building where this shared lab space is housed." />
|
||||
<camunda:property id="spreadsheet.name" value="BuildingList.xls" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Building Name" />
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpaceRoomID" label="Shared Space Room ID" type="string">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
<camunda:property id="description" value="Enter the room number or other unique identifier" />
|
||||
<camunda:property id="hide_expression" value="model.SharedSpaceBuilding === "Other"" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ShareSpaceRoomIDBuilding" label="Room No, and Building Name" type="string">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Enter the Room No and Building of your shared space." />
|
||||
<camunda:property id="hide_expression" value="model.SharedSpaceBuilding !== "Other" | model.SharedSpaceBuilding === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpaceAMComputingID" label="Area Monitor" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="ldap.lookup" value="true" />
|
||||
<camunda:property id="placeholder" value="wxy0z or Smith" />
|
||||
<camunda:property id="description" value="Enter Area Monitor's Computing ID or last name and select Area Monitor. Leave blank if not known." />
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpaceSqFt" label="Space square Feet" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
<camunda:property id="description" value="Enter the number of square feet in this space." />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpacePercentUsable" label="Percent of Space Usable By Personnel" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
<camunda:property id="description" value="If known, enter a number between 1 & 100 which indicates the approximate percent of the total space personnel will work in and move around in during the workday." />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="min" config="1" />
|
||||
<camunda:constraint name="max" config="100" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpaceMaxPersonnel" label="Maximum Number of Personnel Occupying Space" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Enter the maximum number of personnel, including yourself, who will occupy this space." />
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SharedSpacePI" label="Shared Space PI(s)" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="For each PI you share this space with, enter their Name, School, Department and Email Address." />
|
||||
<camunda:property id="rows" value="5" />
|
||||
<camunda:property id="repeat" value="Shared" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_19xeq76</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_16342pm</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:parallelGateway id="Gateway_0frfdnc">
|
||||
<bpmn:incoming>Flow_1v7r1tg</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_19xeq76</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_0qf2y84</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_15zy1q7</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_0ya8hw8</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:parallelGateway id="Gateway_1vj4zd3">
|
||||
<bpmn:incoming>Flow_0tk64b6</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_12ie6w0</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_0zz2hbq</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_16342pm</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1eiud85</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:sequenceFlow id="Flow_19xeq76" sourceRef="Gateway_0frfdnc" targetRef="Activity_SharedSpaceInfo" />
|
||||
<bpmn:sequenceFlow id="Flow_16342pm" sourceRef="Activity_SharedSpaceInfo" targetRef="Gateway_1vj4zd3" />
|
||||
<bpmn:sequenceFlow id="Flow_16y8glw" sourceRef="Activity-PI_Info" targetRef="Activity_1u58hox" />
|
||||
<bpmn:sequenceFlow id="Flow_0qf2y84" sourceRef="Gateway_0frfdnc" targetRef="Activity_ExclusiveSpace" />
|
||||
<bpmn:sequenceFlow id="Flow_0tk64b6" sourceRef="Activity_ExclusiveSpace" targetRef="Gateway_1vj4zd3" />
|
||||
<bpmn:userTask id="Activity_ExclusiveSpace" name="Enter Exclusive Space" camunda:formKey="ExclusiveSpace">
|
||||
<bpmn:documentation>#### Space managed exclusively by {{ PIComputingID.label }}
|
||||
Submit one entry for each space the PI is the exclusive investigator. If all space is shared with one or more other investigators, Click Save to skip this section and proceed to the Shared Space section.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="ExclusiveSpaceBuilding" label="Room No. & Building Name" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Type key word to find the building in which the lab is located." />
|
||||
<camunda:property id="spreadsheet.name" value="BuildingList.xls" />
|
||||
<camunda:property id="spreadsheet.value.column" value="Value" />
|
||||
<camunda:property id="spreadsheet.label.column" value="Building Name" />
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceRoomID" label="Exclusive Space Room ID" type="string">
|
||||
<camunda:properties>
|
||||
<camunda:property id="hide_expression" value="model.ExclusiveSpaceBuilding === "Other"" />
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
<camunda:property id="description" value="Enter the room number or other unique identifier" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceRoomIDBuilding" label="Room No, and Building Name" type="string">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Enter the Room No and Building of your exclusive space." />
|
||||
<camunda:property id="hide_expression" value="model.ExclusiveSpaceBuilding !== "Other" | model.ExclusiveSpaceBuilding === null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceType" label="Space Room Type" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
<camunda:property id="enum_type" value="radio" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
<camunda:value id="Lab" name="Lab" />
|
||||
<camunda:value id="Office" name="Office" />
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceAMComputingID" label="Area Monitor" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="ldap.lookup" value="true" />
|
||||
<camunda:property id="description" value="Enter Area Monitor's Computing ID or last name and select Area Monitor. Leave blank if not known." />
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
<camunda:property id="placeholder" value="wxy0z or Smith" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceSqFt" label="Space Square Feet" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
<camunda:property id="description" value="Enter the number of square feet in this space." />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpacePercentUsable" label="Percent of Space Usable By Personnel" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
<camunda:property id="description" value="If known, enter a number between 1 & 100 which indicates the approximate percent of the total space personnel will work in and move around in during the workday." />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="min" config="1" />
|
||||
<camunda:constraint name="max" config="100" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="ExclusiveSpaceMaxPersonnel" label="Maximum Number of Personnel Occupying Space" type="long">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Enter the maximum number of personnel, including yourself, who will occupy this space." />
|
||||
<camunda:property id="repeat" value="Exclusive" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_0qf2y84</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0tk64b6</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_15zy1q7" sourceRef="Gateway_0frfdnc" targetRef="UserTask_CoreResource" />
|
||||
<bpmn:sequenceFlow id="Flow_12ie6w0" sourceRef="UserTask_CoreResource" targetRef="Gateway_1vj4zd3" />
|
||||
<bpmn:sequenceFlow id="Flow_0ya8hw8" sourceRef="Gateway_0frfdnc" targetRef="Activity_nonUVASpaces" />
|
||||
<bpmn:userTask id="Activity_nonUVASpaces" name="Enter non-UVA Spaces" camunda:formKey="nonUVA Spaces">
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="isNonUVASpaces" label="non-UVA Spaces?" type="boolean">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Does any of your research occur in non-UVA spaces? (For example, field stations or UVA leased buildings off Grounds) " />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="NonUVASpaces" label="Where?" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="description" value="Does any of your research occur in non-UVA spaces? (For example, field stations or UVA leased buildings off Grounds) " />
|
||||
<camunda:property id="hide_expression" value="!model.isNonUVASpaces | model.isNonUVASpaces == null" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_0ya8hw8</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0zz2hbq</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_0zz2hbq" sourceRef="Activity_nonUVASpaces" targetRef="Gateway_1vj4zd3" />
|
||||
<bpmn:sequenceFlow id="Flow_1eiud85" sourceRef="Gateway_1vj4zd3" targetRef="Personnel" />
|
||||
<bpmn:sequenceFlow id="Flow_1nbjr72" sourceRef="Personnel" targetRef="Gateway_18jn18b" />
|
||||
<bpmn:userTask id="Activity_DistanceReq" name="Enter Distancing Requirements" camunda:formKey="Distancing Requirements">
|
||||
<bpmn:documentation>#### Distancing requirements:
|
||||
Maintain social distancing by designing space between people to be at least 9 feet during prolonged work which will be accomplished by restricting the number of people in the lab to a density of ~250 sq. ft. /person in lab areas. When moving around, a minimum of 6 feet social distancing is required. Ideally only one person per lab bench and not more than one person can work at the same time in the same bay.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="CIDR_TotalSqFt" label="What is the total square footage of your lab?" type="long">
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="CIDR_MaxPersonnel" label="How many personnel will be using the lab at any one time, at a maximum?" type="long">
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_0p2r1bo</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0tz5c2v</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_0p2r1bo" sourceRef="Gateway_18jn18b" targetRef="Activity_DistanceReq" />
|
||||
<bpmn:parallelGateway id="Gateway_18jn18b">
|
||||
<bpmn:incoming>Flow_1nbjr72</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0p2r1bo</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_0mkh1wn</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_1yqkpgu</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_1c6m5wv</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:sequenceFlow id="Flow_0mkh1wn" sourceRef="Gateway_18jn18b" targetRef="Activity_PWA" />
|
||||
<bpmn:userTask id="Activity_PWA" name="Enter Physical Work Arrangements" camunda:formKey="Physical Work Arrangements">
|
||||
<bpmn:documentation>Describe physical work arrangements for each lab. Show schematic of the lab and space organization to meet the distancing guidelines (see key safety expectations for ramp-up).
|
||||
- Show gross dimensions, location of desks, and equipment in blocks (not details) that show available space for work and foot traffic.
|
||||
- Indicate total square footage for every lab/space that you are requesting adding personnel to in this application. If you would like help obtaining a floor plan for your lab, your department or deans office can help. You can also create a hand drawing/block diagram of your space and the location of objects on a graph paper.
|
||||
- Upload your physical layout and workspace organization in the form of a jpg image or a pdf file. This can be hand-drawn or actual floor plans.
|
||||
- Show and/or describe designated work location for each member (during their shift) in the lab when multiple members are present at a time to meet the distancing guidelines.
|
||||
- Provide a foot traffic plan (on the schematic) to indicate how people can move around while maintaining distancing requirements. This can be a freeform sketch on your floor plan showing where foot traffic can occur in your lab, and conditions, if any, to ensure distancing at all times. (e.g., direction to walk around a lab bench, rules for using shared equipment located in the lab, certain areas of lab prohibited from access, etc.).
|
||||
- Provide your initial weekly laboratory schedule (see excel template) for all members that you are requesting access for, indicating all shifts as necessary. If schedule changes, please submit your revised schedule through the web portal.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="PWADescribe" label="Describe" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="rows" value="10" />
|
||||
<camunda:property id="autosize" value="true" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="PWAFiles" label="Upload supporting files" type="files" />
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_0mkh1wn</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0zrsh65</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_1yqkpgu" sourceRef="Gateway_18jn18b" targetRef="Activity_HSR" />
|
||||
<bpmn:userTask id="Activity_HSR" name="Enter Health Safety Requirements" camunda:formKey="Lab Plan">
|
||||
<bpmn:documentation>#### Health Safety Requirements:
|
||||
Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/url?q=http://ehs.virginia.edu/files/Lab-Safety-Plan-During-COVID-19.docx&source=gmail&ust=1590687968958000&usg=AFQjCNE83uGDFtxGkKaxjuXGhTocu-FDmw) to create and upload a copy of your laboratory policy statement to all members which includes at a minimum the following details:
|
||||
- Laboratory face covering rules, use of other PPE use as required
|
||||
- Adherence to individual schedules, check-in, check out requirements
|
||||
- Completion of online EHS safety training requirement
|
||||
- Health self-attestation requirement
|
||||
- Sanitizing procedures including frequency and type of disinfectants to use
|
||||
- Where and how to obtain PPE including face covering</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="LabPlan" label="Upload Lab Plan" type="files" />
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_1yqkpgu</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1ox5nv6</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_1c6m5wv" sourceRef="Gateway_18jn18b" targetRef="Activity_OtherReq" />
|
||||
<bpmn:userTask id="Activity_OtherReq" name="Enter Other Requirements" camunda:formKey="Other Requirements">
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="isAnimalResearch" label="Will you be using animals in your research? " type="boolean">
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="isAnimalResearchCoordinated" label="Have you coordinated with the vivarium manager to meet your needs?" type="boolean">
|
||||
<camunda:properties>
|
||||
<camunda:property id="hide_expression" value="!model.isAnimalResearch | model.isAnimalResearch == null" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="isHumanSubjects" label="Does your research involve human subjects? " type="boolean">
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="IRBApprovalRelevantNumbers" label="List IRB Approval Relevant Numbers" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="rows" value="5" />
|
||||
<camunda:property id="hide_expression" value="!model.isHumanSubjects | model.isHumanSubjects == null" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="isNecessarySupplies" label="Do you have the necessary materials, supplies, cleaning supplies and PPE for your laboratory?" type="boolean">
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="SupplyList" label="List your needs" type="textarea">
|
||||
<camunda:properties>
|
||||
<camunda:property id="hide_expression" value="model.isNecessarySupplies | model.isNecessarySupplies == null" />
|
||||
<camunda:property id="rows" value="5" />
|
||||
</camunda:properties>
|
||||
<camunda:validation>
|
||||
<camunda:constraint name="required" config="true" />
|
||||
</camunda:validation>
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_1c6m5wv</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0qbi47d</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:manualTask id="Activity_SubmitPlan" name="Acknowledge Plan Submission">
|
||||
<bpmn:documentation>#### By submitting this request, you understand that every member listed in this form for on Grounds laboratory access will:
|
||||
- Complete online COVID awareness & precaution training module (link forthcoming-May 25)
|
||||
- Complete daily health acknowledgement form signed (electronically) –email generated daily to those listed on your plan for access to on Grounds lab/research space
|
||||
- Fill out daily work attendance log for all lab members following your school process to check-in and out of work each day.</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_08njvvi</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0j4rs82</bpmn:outgoing>
|
||||
</bpmn:manualTask>
|
||||
<bpmn:sequenceFlow id="Flow_0zrsh65" sourceRef="Activity_PWA" targetRef="Gateway_0sijkgx" />
|
||||
<bpmn:sequenceFlow id="Flow_0tz5c2v" sourceRef="Activity_DistanceReq" targetRef="Gateway_0sijkgx" />
|
||||
<bpmn:sequenceFlow id="Flow_1ox5nv6" sourceRef="Activity_HSR" targetRef="Gateway_0sijkgx" />
|
||||
<bpmn:sequenceFlow id="Flow_0qbi47d" sourceRef="Activity_OtherReq" targetRef="Gateway_0sijkgx" />
|
||||
<bpmn:sequenceFlow id="Flow_06873ag" sourceRef="Gateway_0sijkgx" targetRef="Activity_1tub2mc" />
|
||||
<bpmn:parallelGateway id="Gateway_0sijkgx">
|
||||
<bpmn:incoming>Flow_0zrsh65</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_0tz5c2v</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_1ox5nv6</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_0qbi47d</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_06873ag</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:scriptTask id="Activity_1tub2mc" name="Generate RRP">
|
||||
<bpmn:incoming>Flow_06873ag</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0aqgwvu</bpmn:outgoing>
|
||||
<bpmn:script>CompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="Flow_0aqgwvu" sourceRef="Activity_1tub2mc" targetRef="Activity_AcknowledgePlanReview" />
|
||||
<bpmn:sequenceFlow id="Flow_0j4rs82" sourceRef="Activity_SubmitPlan" targetRef="Activity_0absozl" />
|
||||
<bpmn:sequenceFlow id="Flow_07ge8uf" sourceRef="Activity_0absozl" targetRef="Activity_RequestStatus" />
|
||||
<bpmn:sequenceFlow id="Flow_1ufh44h" sourceRef="Activity_RequestStatus" targetRef="Activity_AreaMonitorNotification" />
|
||||
<bpmn:userTask id="Activity_RequestStatus" name="Check-on Request Status" camunda:formKey="Rquest Status">
|
||||
<bpmn:documentation>#### Approval Process
|
||||
The Research Ramp-up Plan and associated documents will be reviewed by{{ " " + ApprvlApprvrName1 }}{{ '.' if ApprvlApprvrName2 == 'n/a' else ' and ' + ApprvlApprvrName2 + '.' }} While waiting for approval, be sure that all required training has been completed and supplies secured. When the approval email notification is received, confirming the three questions below will allow you to proceed.
|
||||
|
||||
|
||||
If a rejection notification is received, go back to the first step that needs to be addressed and step through each subsequent form from that point.</bpmn:documentation>
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="ApprovalReceived" label="Please Confirm:" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="enum_type" value="checkbox" />
|
||||
</camunda:properties>
|
||||
<camunda:value id="ApprovalNotificationReceived" name="Approval Notification Received?" />
|
||||
</camunda:formField>
|
||||
<camunda:formField id="RequiredTraining" label="Please Confirm:" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="enum_type" value="checkbox" />
|
||||
</camunda:properties>
|
||||
<camunda:value id="AllRequiredTraining" name="All Required Training Completed?" />
|
||||
</camunda:formField>
|
||||
<camunda:formField id="NeededSupplies" label="Please Confirm"" type="enum">
|
||||
<camunda:properties>
|
||||
<camunda:property id="enum_type" value="checkbox" />
|
||||
</camunda:properties>
|
||||
<camunda:value id="NeededSupplies" name="All Needed Supplies Secured?" />
|
||||
</camunda:formField>
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_07ge8uf</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1ufh44h</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:sequenceFlow id="Flow_05w8yd6" sourceRef="Activity_WhatNext" targetRef="EndEvent_09wp7av" />
|
||||
<bpmn:sequenceFlow id="Flow_08njvvi" sourceRef="Activity_ApprovalInfo" targetRef="Activity_SubmitPlan" />
|
||||
<bpmn:businessRuleTask id="Activity_ApprovalInfo" name="Assign Approval Info" camunda:decisionRef="Decision_ApprovalInfo">
|
||||
<bpmn:incoming>Flow_1e2qi9s</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_08njvvi</bpmn:outgoing>
|
||||
</bpmn:businessRuleTask>
|
||||
<bpmn:manualTask id="Activity_WhatNext" name="Review What's Next?">
|
||||
<bpmn:documentation>### Stay Here, In Case Something Changes
|
||||
If anything changes in the Research Ramp-up Plan submitted, click **Start over** at the top of the menu and step through each form, updating as needed. Data will be preserved, so if no changes are needed on a specific form, click Save and proceed.
|
||||
|
||||
|
||||
If notification is received that the Research Ramp-up Plan approval process is no longer required, click Continue to close out the workflow process.</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_0cpmvcw</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_05w8yd6</bpmn:outgoing>
|
||||
</bpmn:manualTask>
|
||||
<bpmn:sequenceFlow id="Flow_0cpmvcw" sourceRef="Activity_AreaMonitorNotification" targetRef="Activity_WhatNext" />
|
||||
<bpmn:manualTask id="Activity_AreaMonitorNotification" name="Send Area Monitor Notification">
|
||||
<bpmn:documentation>#### Ready to Ramp-up Research
|
||||
Notify the Area Monitor for
|
||||
|
||||
|
||||
#### Exclusive Space Area Monitors
|
||||
{% for es in exclusive %}
|
||||
{{ es.ExclusiveSpaceAMComputingID.data.display_name }}
|
||||
{% else %}
|
||||
No exclusive space entered
|
||||
{% endfor %}
|
||||
|
||||
|
||||
#### Shared Space Area Monitors
|
||||
{% for ss in shared %}
|
||||
{{ ss.SharedSpaceAMComputingID.data.display_name }}
|
||||
{% else %}
|
||||
No shared space entered
|
||||
{% endfor %}</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_1ufh44h</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0cpmvcw</bpmn:outgoing>
|
||||
</bpmn:manualTask>
|
||||
<bpmn:scriptTask id="Activity_0absozl" name="Execute Plan Submission">
|
||||
<bpmn:incoming>Flow_0j4rs82</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_07ge8uf</bpmn:outgoing>
|
||||
<bpmn:script>RequestApproval ApprvlApprvr1 ApprvlApprvr2</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="Flow_1v7r1tg" sourceRef="Activity_1u58hox" targetRef="Gateway_0frfdnc" />
|
||||
<bpmn:scriptTask id="Activity_1u58hox" name="Update Request">
|
||||
<bpmn:incoming>Flow_16y8glw</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1v7r1tg</bpmn:outgoing>
|
||||
<bpmn:script>UpdateStudy title:PIComputingID.label pi:PIComputingID.value</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_0ssahs9">
|
||||
<bpmndi:BPMNEdge id="Flow_1v7r1tg_di" bpmnElement="Flow_1v7r1tg">
|
||||
<di:waypoint x="630" y="307" />
|
||||
<di:waypoint x="685" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0cpmvcw_di" bpmnElement="Flow_0cpmvcw">
|
||||
<di:waypoint x="2470" y="307" />
|
||||
<di:waypoint x="2520" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_08njvvi_di" bpmnElement="Flow_08njvvi">
|
||||
<di:waypoint x="1900" y="307" />
|
||||
<di:waypoint x="1930" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_05w8yd6_di" bpmnElement="Flow_05w8yd6">
|
||||
<di:waypoint x="2620" y="307" />
|
||||
<di:waypoint x="2692" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1ufh44h_di" bpmnElement="Flow_1ufh44h">
|
||||
<di:waypoint x="2330" y="307" />
|
||||
<di:waypoint x="2370" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_07ge8uf_di" bpmnElement="Flow_07ge8uf">
|
||||
<di:waypoint x="2180" y="307" />
|
||||
<di:waypoint x="2230" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0j4rs82_di" bpmnElement="Flow_0j4rs82">
|
||||
<di:waypoint x="2030" y="307" />
|
||||
<di:waypoint x="2080" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0aqgwvu_di" bpmnElement="Flow_0aqgwvu">
|
||||
<di:waypoint x="1640" y="307" />
|
||||
<di:waypoint x="1670" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_06873ag_di" bpmnElement="Flow_06873ag">
|
||||
<di:waypoint x="1495" y="307" />
|
||||
<di:waypoint x="1540" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0qbi47d_di" bpmnElement="Flow_0qbi47d">
|
||||
<di:waypoint x="1400" y="510" />
|
||||
<di:waypoint x="1470" y="510" />
|
||||
<di:waypoint x="1470" y="332" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1ox5nv6_di" bpmnElement="Flow_1ox5nv6">
|
||||
<di:waypoint x="1400" y="380" />
|
||||
<di:waypoint x="1470" y="380" />
|
||||
<di:waypoint x="1470" y="332" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0tz5c2v_di" bpmnElement="Flow_0tz5c2v">
|
||||
<di:waypoint x="1400" y="120" />
|
||||
<di:waypoint x="1470" y="120" />
|
||||
<di:waypoint x="1470" y="282" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0zrsh65_di" bpmnElement="Flow_0zrsh65">
|
||||
<di:waypoint x="1400" y="240" />
|
||||
<di:waypoint x="1470" y="240" />
|
||||
<di:waypoint x="1470" y="282" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1c6m5wv_di" bpmnElement="Flow_1c6m5wv">
|
||||
<di:waypoint x="1230" y="332" />
|
||||
<di:waypoint x="1230" y="510" />
|
||||
<di:waypoint x="1300" y="510" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1yqkpgu_di" bpmnElement="Flow_1yqkpgu">
|
||||
<di:waypoint x="1230" y="332" />
|
||||
<di:waypoint x="1230" y="380" />
|
||||
<di:waypoint x="1300" y="380" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0mkh1wn_di" bpmnElement="Flow_0mkh1wn">
|
||||
<di:waypoint x="1230" y="282" />
|
||||
<di:waypoint x="1230" y="240" />
|
||||
<di:waypoint x="1300" y="240" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0p2r1bo_di" bpmnElement="Flow_0p2r1bo">
|
||||
<di:waypoint x="1230" y="282" />
|
||||
<di:waypoint x="1230" y="120" />
|
||||
<di:waypoint x="1300" y="120" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1nbjr72_di" bpmnElement="Flow_1nbjr72">
|
||||
<di:waypoint x="1150" y="307" />
|
||||
<di:waypoint x="1205" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1eiud85_di" bpmnElement="Flow_1eiud85">
|
||||
<di:waypoint x="995" y="307" />
|
||||
<di:waypoint x="1050" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0zz2hbq_di" bpmnElement="Flow_0zz2hbq">
|
||||
<di:waypoint x="890" y="510" />
|
||||
<di:waypoint x="970" y="510" />
|
||||
<di:waypoint x="970" y="332" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0ya8hw8_di" bpmnElement="Flow_0ya8hw8">
|
||||
<di:waypoint x="710" y="332" />
|
||||
<di:waypoint x="710" y="510" />
|
||||
<di:waypoint x="790" y="510" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_12ie6w0_di" bpmnElement="Flow_12ie6w0">
|
||||
<di:waypoint x="890" y="370" />
|
||||
<di:waypoint x="970" y="370" />
|
||||
<di:waypoint x="970" y="332" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_15zy1q7_di" bpmnElement="Flow_15zy1q7">
|
||||
<di:waypoint x="710" y="332" />
|
||||
<di:waypoint x="710" y="370" />
|
||||
<di:waypoint x="790" y="370" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0tk64b6_di" bpmnElement="Flow_0tk64b6">
|
||||
<di:waypoint x="890" y="110" />
|
||||
<di:waypoint x="970" y="110" />
|
||||
<di:waypoint x="970" y="282" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0qf2y84_di" bpmnElement="Flow_0qf2y84">
|
||||
<di:waypoint x="710" y="282" />
|
||||
<di:waypoint x="710" y="110" />
|
||||
<di:waypoint x="790" y="110" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_16y8glw_di" bpmnElement="Flow_16y8glw">
|
||||
<di:waypoint x="480" y="307" />
|
||||
<di:waypoint x="530" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_16342pm_di" bpmnElement="Flow_16342pm">
|
||||
<di:waypoint x="890" y="240" />
|
||||
<di:waypoint x="970" y="240" />
|
||||
<di:waypoint x="970" y="282" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_19xeq76_di" bpmnElement="Flow_19xeq76">
|
||||
<di:waypoint x="710" y="282" />
|
||||
<di:waypoint x="710" y="240" />
|
||||
<di:waypoint x="790" y="240" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1e2qi9s_di" bpmnElement="Flow_1e2qi9s">
|
||||
<di:waypoint x="1770" y="307" />
|
||||
<di:waypoint x="1800" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_05ja25w_di" bpmnElement="SequenceFlow_05ja25w">
|
||||
<di:waypoint x="168" y="307" />
|
||||
<di:waypoint x="230" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0h50bp3_di" bpmnElement="SequenceFlow_0h50bp3">
|
||||
<di:waypoint x="330" y="307" />
|
||||
<di:waypoint x="380" y="307" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="132" y="289" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ManualTask_1ofy9yz_di" bpmnElement="ManualTask_Instructions">
|
||||
<dc:Bounds x="230" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_0xdpoxl_di" bpmnElement="Activity-PI_Info">
|
||||
<dc:Bounds x="380" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_0ecab9j_di" bpmnElement="Personnel">
|
||||
<dc:Bounds x="1050" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_0l8vxty_di" bpmnElement="UserTask_CoreResource">
|
||||
<dc:Bounds x="790" y="330" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_09wp7av_di" bpmnElement="EndEvent_09wp7av">
|
||||
<dc:Bounds x="2692" y="289" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1mg5lp9_di" bpmnElement="Activity_AcknowledgePlanReview">
|
||||
<dc:Bounds x="1670" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1xgrlzr_di" bpmnElement="Activity_SharedSpaceInfo">
|
||||
<dc:Bounds x="790" y="200" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_0tn2il3_di" bpmnElement="Gateway_0frfdnc">
|
||||
<dc:Bounds x="685" y="282" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_1o1fcbg_di" bpmnElement="Gateway_1vj4zd3">
|
||||
<dc:Bounds x="945" y="282" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1jefdme_di" bpmnElement="Activity_ExclusiveSpace">
|
||||
<dc:Bounds x="790" y="70" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0ysw6zo_di" bpmnElement="Activity_nonUVASpaces">
|
||||
<dc:Bounds x="790" y="470" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1xag6qb_di" bpmnElement="Activity_DistanceReq">
|
||||
<dc:Bounds x="1300" y="80" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_0yof76x_di" bpmnElement="Gateway_18jn18b">
|
||||
<dc:Bounds x="1205" y="282" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_166b0qq_di" bpmnElement="Activity_PWA">
|
||||
<dc:Bounds x="1300" y="200" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0byj9mp_di" bpmnElement="Activity_HSR">
|
||||
<dc:Bounds x="1300" y="340" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0j0p13i_di" bpmnElement="Activity_OtherReq">
|
||||
<dc:Bounds x="1300" y="470" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1h30fo8_di" bpmnElement="Activity_SubmitPlan">
|
||||
<dc:Bounds x="1930" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_03lxnzh_di" bpmnElement="Gateway_0sijkgx">
|
||||
<dc:Bounds x="1445" y="282" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_117owwi_di" bpmnElement="Activity_1tub2mc">
|
||||
<dc:Bounds x="1540" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0fxf44t_di" bpmnElement="Activity_RequestStatus">
|
||||
<dc:Bounds x="2230" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_080o38p_di" bpmnElement="Activity_ApprovalInfo">
|
||||
<dc:Bounds x="1800" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0wuukfn_di" bpmnElement="Activity_WhatNext">
|
||||
<dc:Bounds x="2520" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0js9ww9_di" bpmnElement="Activity_AreaMonitorNotification">
|
||||
<dc:Bounds x="2370" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0wnn9de_di" bpmnElement="Activity_0absozl">
|
||||
<dc:Bounds x="2080" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0f0ak6p_di" bpmnElement="Activity_1u58hox">
|
||||
<dc:Bounds x="530" y="267" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1v1rp1q" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="empty_workflow" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_0lvudp8</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_0lvudp8" sourceRef="StartEvent_1" targetRef="EndEvent_0q4qzl9" />
|
||||
<bpmn:endEvent id="EndEvent_0q4qzl9">
|
||||
<bpmn:incoming>SequenceFlow_0lvudp8</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="empty_workflow">
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0lvudp8_di" bpmnElement="SequenceFlow_0lvudp8">
|
||||
<di:waypoint x="238" y="117" />
|
||||
<di:waypoint x="432" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="EndEvent_0q4qzl9_di" bpmnElement="EndEvent_0q4qzl9">
|
||||
<dc:Bounds x="432" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="202" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1x1akiz" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1x1akiz" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_0quormc" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_17znkku</bpmn:outgoing>
|
||||
|
@ -16,10 +16,9 @@
|
|||
</camunda:formField>
|
||||
<camunda:formField id="FormField_FromOSP" label="From OSP" type="autocomplete">
|
||||
<camunda:properties>
|
||||
<camunda:property id="enum.options.file" value="sponsors.xls" />
|
||||
<camunda:property id="enum.options.value.column" value="CUSTOMER_NUMBER" />
|
||||
<camunda:property id="enum.options.label.column" value="CUSTOMER_NAME" />
|
||||
<camunda:property id="enum.options.lookup" value="True" />
|
||||
<camunda:property id="spreadsheet.name" value="sponsors.xls" />
|
||||
<camunda:property id="spreadsheet.value.column" value="CUSTOMER_NUMBER" />
|
||||
<camunda:property id="spreadsheet.label.column" value="CUSTOMER_NAME" />
|
||||
</camunda:properties>
|
||||
</camunda:formField>
|
||||
<camunda:formField id="FormField_Type" label="Select all that apply:" type="enum">
|
||||
|
@ -70,51 +69,51 @@
|
|||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_0quormc">
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1n3utyf_di" bpmnElement="SequenceFlow_1n3utyf">
|
||||
<di:waypoint x="430" y="40" />
|
||||
<di:waypoint x="490" y="40" />
|
||||
<di:waypoint x="490" y="92" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_17znkku_di" bpmnElement="SequenceFlow_17znkku">
|
||||
<di:waypoint x="188" y="117" />
|
||||
<di:waypoint x="235" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_13604n2_di" bpmnElement="Flow_13604n2">
|
||||
<di:waypoint x="260" y="92" />
|
||||
<di:waypoint x="260" y="40" />
|
||||
<di:waypoint x="330" y="40" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_030v94s_di" bpmnElement="Flow_030v94s">
|
||||
<di:waypoint x="515" y="117" />
|
||||
<di:waypoint x="552" y="117" />
|
||||
<bpmndi:BPMNEdge id="Flow_1l3gw28_di" bpmnElement="Flow_1l3gw28">
|
||||
<di:waypoint x="430" y="280" />
|
||||
<di:waypoint x="490" y="280" />
|
||||
<di:waypoint x="490" y="222" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_0hdjgx6_di" bpmnElement="Flow_0hdjgx6">
|
||||
<di:waypoint x="260" y="142" />
|
||||
<di:waypoint x="260" y="200" />
|
||||
<di:waypoint x="330" y="200" />
|
||||
<di:waypoint x="260" y="222" />
|
||||
<di:waypoint x="260" y="280" />
|
||||
<di:waypoint x="330" y="280" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1l3gw28_di" bpmnElement="Flow_1l3gw28">
|
||||
<di:waypoint x="430" y="200" />
|
||||
<di:waypoint x="490" y="200" />
|
||||
<di:waypoint x="490" y="142" />
|
||||
<bpmndi:BPMNEdge id="Flow_030v94s_di" bpmnElement="Flow_030v94s">
|
||||
<di:waypoint x="515" y="197" />
|
||||
<di:waypoint x="552" y="197" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="EndEvent_19upzzo_di" bpmnElement="EndEvent_19upzzo">
|
||||
<dc:Bounds x="552" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_1s4ro2h_di" bpmnElement="Gateway_0bgimhg">
|
||||
<dc:Bounds x="235" y="92" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_1kowkjp_di" bpmnElement="Gateway_1924s77">
|
||||
<dc:Bounds x="465" y="92" width="50" height="50" />
|
||||
<bpmndi:BPMNEdge id="Flow_13604n2_di" bpmnElement="Flow_13604n2">
|
||||
<di:waypoint x="260" y="172" />
|
||||
<di:waypoint x="260" y="120" />
|
||||
<di:waypoint x="330" y="120" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1n3utyf_di" bpmnElement="SequenceFlow_1n3utyf">
|
||||
<di:waypoint x="430" y="120" />
|
||||
<di:waypoint x="490" y="120" />
|
||||
<di:waypoint x="490" y="172" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_17znkku_di" bpmnElement="SequenceFlow_17znkku">
|
||||
<di:waypoint x="188" y="197" />
|
||||
<di:waypoint x="235" y="197" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="152" y="179" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_15oiwqt_di" bpmnElement="Task_14cuhvm">
|
||||
<dc:Bounds x="330" y="0" width="100" height="80" />
|
||||
<dc:Bounds x="330" y="80" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_19upzzo_di" bpmnElement="EndEvent_19upzzo">
|
||||
<dc:Bounds x="552" y="179" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1oeywwl_di" bpmnElement="Activity_0xxhfyh">
|
||||
<dc:Bounds x="330" y="160" width="100" height="80" />
|
||||
<dc:Bounds x="330" y="240" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="152" y="99" width="36" height="36" />
|
||||
<bpmndi:BPMNShape id="Gateway_1s4ro2h_di" bpmnElement="Gateway_0bgimhg">
|
||||
<dc:Bounds x="235" y="172" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Gateway_1kowkjp_di" bpmnElement="Gateway_1924s77">
|
||||
<dc:Bounds x="465" y="172" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
|
|
Binary file not shown.
|
@ -0,0 +1,15 @@
|
|||
python_home = '/usr/local/envs/crcpython3'
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Calculate path to site-packages directory.
|
||||
|
||||
python_version = '.'.join(map(str, sys.version_info[:2]))
|
||||
site_packages = python_home + '/lib/python%s/site-packages' % python_version
|
||||
|
||||
# Add the site-packages directory.
|
||||
|
||||
site.addsitedir(site_packages)
|
||||
|
||||
from crc import app as application
|
|
@ -0,0 +1,90 @@
|
|||
alabaster==0.7.12
|
||||
alembic==1.4.2
|
||||
amqp==2.5.2
|
||||
aniso8601==8.0.0
|
||||
attrs==19.3.0
|
||||
babel==2.8.0
|
||||
bcrypt==3.1.7
|
||||
beautifulsoup4==4.9.1
|
||||
billiard==3.6.3.0
|
||||
blinker==1.4
|
||||
celery==4.4.2
|
||||
certifi==2020.4.5.1
|
||||
cffi==1.14.0
|
||||
chardet==3.0.4
|
||||
click==7.1.2
|
||||
clickclick==1.2.2
|
||||
commonmark==0.9.1
|
||||
configparser==5.0.0
|
||||
connexion==2.7.0
|
||||
coverage==5.1
|
||||
docutils==0.16
|
||||
docxtpl==0.9.2
|
||||
et-xmlfile==1.0.1
|
||||
flask==1.1.2
|
||||
flask-bcrypt==0.7.1
|
||||
flask-cors==3.0.8
|
||||
flask-marshmallow==0.12.0
|
||||
flask-migrate==2.5.3
|
||||
flask-restful==0.3.8
|
||||
flask-sqlalchemy==2.4.1
|
||||
flask-sso==0.4.0
|
||||
future==0.18.2
|
||||
httpretty==1.0.2
|
||||
idna==2.9
|
||||
imagesize==1.2.0
|
||||
importlib-metadata==1.6.0
|
||||
inflection==0.4.0
|
||||
itsdangerous==1.1.0
|
||||
jdcal==1.4.1
|
||||
jinja2==2.11.2
|
||||
jsonschema==3.2.0
|
||||
kombu==4.6.8
|
||||
ldap3==2.7
|
||||
lxml==4.5.1
|
||||
mako==1.1.2
|
||||
markupsafe==1.1.1
|
||||
marshmallow==3.6.0
|
||||
marshmallow-enum==1.5.1
|
||||
marshmallow-sqlalchemy==0.23.0
|
||||
numpy==1.18.4
|
||||
openapi-spec-validator==0.2.8
|
||||
openpyxl==3.0.3
|
||||
packaging==20.4
|
||||
pandas==1.0.3
|
||||
psycopg2-binary==2.8.5
|
||||
pyasn1==0.4.8
|
||||
pycparser==2.20
|
||||
pygments==2.6.1
|
||||
pyjwt==1.7.1
|
||||
pyparsing==2.4.7
|
||||
pyrsistent==0.16.0
|
||||
python-dateutil==2.8.1
|
||||
python-docx==0.8.10
|
||||
python-editor==1.0.4
|
||||
pytz==2020.1
|
||||
pyyaml==5.3.1
|
||||
recommonmark==0.6.0
|
||||
requests==2.23.0
|
||||
six==1.14.0
|
||||
snowballstemmer==2.0.0
|
||||
soupsieve==2.0.1
|
||||
sphinx==3.0.3
|
||||
sphinxcontrib-applehelp==1.0.2
|
||||
sphinxcontrib-devhelp==1.0.2
|
||||
sphinxcontrib-htmlhelp==1.0.3
|
||||
sphinxcontrib-jsmath==1.0.1
|
||||
sphinxcontrib-qthelp==1.0.3
|
||||
sphinxcontrib-serializinghtml==1.1.4
|
||||
spiffworkflow
|
||||
sqlalchemy==1.3.17
|
||||
swagger-ui-bundle==0.0.6
|
||||
urllib3==1.25.9
|
||||
vine==1.3.0
|
||||
waitress==1.4.3
|
||||
webob==1.8.6
|
||||
webtest==2.0.35
|
||||
werkzeug==1.0.1
|
||||
xlrd==1.2.0
|
||||
xlsxwriter==1.2.8
|
||||
zipp==3.1.0
|
|
@ -0,0 +1,4 @@
|
|||
jq -r '.default
|
||||
| to_entries[]
|
||||
| .key + .value.version' \
|
||||
../Pipfile.lock > requirements.txt
|
|
@ -1,23 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
# run migrations
|
||||
export FLASK_APP=./crc/__init__.py
|
||||
|
||||
for entry in ./instance/* ; do
|
||||
echo "$entry"
|
||||
cat $entry
|
||||
done
|
||||
export FLASK_APP=/app/crc/__init__.py
|
||||
|
||||
if [ "$DOWNGRADE_DB" = "true" ]; then
|
||||
echo 'Downgrading...'
|
||||
echo 'Downgrading database...'
|
||||
pipenv run flask db downgrade
|
||||
fi
|
||||
|
||||
pipenv run flask db upgrade
|
||||
if [ "$UPGRADE_DB" = "true" ]; then
|
||||
echo 'Upgrading database...'
|
||||
pipenv run flask db upgrade
|
||||
fi
|
||||
|
||||
if [ "$RESET_DB" = "true" ]; then
|
||||
echo 'Resetting database...'
|
||||
echo 'Resetting database and seeding it with example CR Connect data...'
|
||||
pipenv run flask load-example-data
|
||||
fi
|
||||
|
||||
pipenv run python ./run.py
|
||||
if [ "$RESET_DB_RRT" = "true" ]; then
|
||||
echo 'Resetting database and seeding it with example RRT data...'
|
||||
pipenv run flask load-example-rrt-data
|
||||
fi
|
||||
|
||||
if [ "$APPLICATION_ROOT" = "/" ]; then
|
||||
pipenv run gunicorn --bind 0.0.0.0:$PORT0 wsgi:app
|
||||
else
|
||||
pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app
|
||||
fi
|
||||
|
|
|
@ -14,7 +14,8 @@ class ExampleDataLoader:
|
|||
session.flush() # Clear out any transactions before deleting it all to avoid spurious errors.
|
||||
for table in reversed(db.metadata.sorted_tables):
|
||||
session.execute(table.delete())
|
||||
session.flush()
|
||||
session.commit()
|
||||
session.flush()
|
||||
|
||||
def load_all(self):
|
||||
|
||||
|
@ -191,8 +192,68 @@ class ExampleDataLoader:
|
|||
category_id=None,
|
||||
master_spec=True)
|
||||
|
||||
def load_rrt(self):
|
||||
file_path = os.path.join(app.root_path, 'static', 'reference', 'rrt_documents.xlsx')
|
||||
file = open(file_path, "rb")
|
||||
FileService.add_reference_file(FileService.DOCUMENT_LIST,
|
||||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
def create_spec(self, id, name, display_name="", description="", filepath=None, master_spec=False, category_id=None, display_order=None):
|
||||
category = WorkflowSpecCategoryModel(
|
||||
id=0,
|
||||
name='research_rampup_category',
|
||||
display_name='Research Ramp-up Category',
|
||||
display_order=0
|
||||
)
|
||||
db.session.add(category)
|
||||
db.session.commit()
|
||||
|
||||
self.create_spec(id="rrt_top_level_workflow",
|
||||
name="rrt_top_level_workflow",
|
||||
display_name="Top Level Workflow",
|
||||
description="Does nothing, we don't use the master workflow here.",
|
||||
category_id=None,
|
||||
master_spec=True)
|
||||
|
||||
self.create_spec(id="research_rampup",
|
||||
name="research_rampup",
|
||||
display_name="Research Ramp-up Toolkit",
|
||||
description="Process for creating a new research ramp-up request.",
|
||||
category_id=0,
|
||||
master_spec=False)
|
||||
|
||||
def load_test_data(self):
|
||||
self.load_reference_documents()
|
||||
|
||||
category = WorkflowSpecCategoryModel(
|
||||
id=0,
|
||||
name='test_category',
|
||||
display_name='Test Category',
|
||||
display_order=0
|
||||
)
|
||||
db.session.add(category)
|
||||
db.session.commit()
|
||||
|
||||
self.create_spec(id="empty_workflow",
|
||||
name="empty_workflow",
|
||||
display_name="Top Level Workflow",
|
||||
description="Does nothing, we don't use the master workflow here.",
|
||||
category_id=None,
|
||||
master_spec=True,
|
||||
from_tests = True)
|
||||
|
||||
self.create_spec(id="random_fact",
|
||||
name="random_fact",
|
||||
display_name="Random Fact",
|
||||
description="The workflow for a Random Fact.",
|
||||
category_id=0,
|
||||
master_spec=False,
|
||||
from_tests=True)
|
||||
|
||||
|
||||
def create_spec(self, id, name, display_name="", description="", filepath=None, master_spec=False,
|
||||
category_id=None, display_order=None, from_tests=False):
|
||||
"""Assumes that a directory exists in static/bpmn with the same name as the given id.
|
||||
further assumes that the [id].bpmn is the primary file for the workflow.
|
||||
returns an array of data models to be added to the database."""
|
||||
|
@ -207,8 +268,11 @@ class ExampleDataLoader:
|
|||
display_order=display_order)
|
||||
db.session.add(spec)
|
||||
db.session.commit()
|
||||
if not filepath:
|
||||
if not filepath and not from_tests:
|
||||
filepath = os.path.join(app.root_path, 'static', 'bpmn', id, "*")
|
||||
if not filepath and from_tests:
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', id, "*")
|
||||
|
||||
files = glob.glob(filepath)
|
||||
for file_path in files:
|
||||
noise, file_extension = os.path.splitext(file_path)
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 23c62c933848
|
||||
Revises: 9b43e725f39c
|
||||
Create Date: 2020-05-28 10:30:49.409760
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '23c62c933848'
|
||||
down_revision = '9b43e725f39c'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_constraint('file_study_id_fkey', 'file', type_='foreignkey')
|
||||
op.drop_column('file', 'task_id')
|
||||
op.drop_column('file', 'study_id')
|
||||
op.drop_column('file', 'form_field_key')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('file', sa.Column('form_field_key', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.add_column('file', sa.Column('study_id', sa.INTEGER(), autoincrement=False, nullable=True))
|
||||
op.add_column('file', sa.Column('task_id', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.create_foreign_key('file_study_id_fkey', 'file', 'study', ['study_id'], ['id'])
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,36 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 5064b72284b7
|
||||
Revises: bec71f7dc652
|
||||
Create Date: 2020-05-28 23:54:45.623361
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '5064b72284b7'
|
||||
down_revision = 'bec71f7dc652'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('lookup_file', sa.Column('field_id', sa.String(), nullable=True))
|
||||
op.add_column('lookup_file', sa.Column('is_ldap', sa.Boolean(), nullable=True))
|
||||
op.add_column('lookup_file', sa.Column('workflow_spec_id', sa.String(), nullable=True))
|
||||
op.drop_column('lookup_file', 'value_column')
|
||||
op.drop_column('lookup_file', 'label_column')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('lookup_file', sa.Column('label_column', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.add_column('lookup_file', sa.Column('value_column', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.drop_column('lookup_file', 'workflow_spec_id')
|
||||
op.drop_column('lookup_file', 'is_ldap')
|
||||
op.drop_column('lookup_file', 'field_id')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,39 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 55c6cd407d89
|
||||
Revises: cc4bccc5e5a8
|
||||
Create Date: 2020-05-22 22:02:46.650170
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '55c6cd407d89'
|
||||
down_revision = 'cc4bccc5e5a8'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('approval',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('study_id', sa.Integer(), nullable=False),
|
||||
sa.Column('workflow_id', sa.Integer(), nullable=False),
|
||||
sa.Column('workflow_version', sa.String(), nullable=True),
|
||||
sa.Column('approver_uid', sa.String(), nullable=True),
|
||||
sa.Column('status', sa.String(), nullable=True),
|
||||
sa.Column('message', sa.String(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['study_id'], ['study.id'], ),
|
||||
sa.ForeignKeyConstraint(['workflow_id'], ['workflow.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('approval')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,44 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 9b43e725f39c
|
||||
Revises: 55c6cd407d89
|
||||
Create Date: 2020-05-25 23:09:14.761831
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9b43e725f39c'
|
||||
down_revision = '55c6cd407d89'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('approval_file',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('file_id', sa.Integer(), nullable=False),
|
||||
sa.Column('approval_id', sa.Integer(), nullable=False),
|
||||
sa.Column('file_version', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['approval_id'], ['approval.id'], ),
|
||||
sa.ForeignKeyConstraint(['file_id'], ['file.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.add_column('approval', sa.Column('date_created', sa.DateTime(timezone=True), nullable=True))
|
||||
op.add_column('approval', sa.Column('version', sa.Integer(), nullable=True))
|
||||
op.add_column('approval', sa.Column('workflow_hash', sa.String(), nullable=True))
|
||||
op.drop_column('approval', 'workflow_version')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('approval', sa.Column('workflow_version', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.drop_column('approval', 'workflow_hash')
|
||||
op.drop_column('approval', 'version')
|
||||
op.drop_column('approval', 'date_created')
|
||||
op.drop_table('approval_file')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,63 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: bec71f7dc652
|
||||
Revises: 23c62c933848
|
||||
Create Date: 2020-05-28 20:08:45.891406
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'bec71f7dc652'
|
||||
down_revision = '23c62c933848'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
|
||||
op.create_table('workflow_spec_dependency_file',
|
||||
sa.Column('file_data_id', sa.Integer(), nullable=False),
|
||||
sa.Column('workflow_id', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['file_data_id'], ['file_data.id'], ),
|
||||
sa.ForeignKeyConstraint(['workflow_id'], ['workflow.id'], ),
|
||||
sa.PrimaryKeyConstraint('file_data_id', 'workflow_id')
|
||||
)
|
||||
op.drop_column('approval', 'workflow_hash')
|
||||
op.execute(
|
||||
"""
|
||||
delete from approval_file;
|
||||
delete from approval;
|
||||
"""
|
||||
)
|
||||
op.add_column('approval_file', sa.Column('file_data_id', sa.Integer(), nullable=False))
|
||||
op.drop_constraint('approval_file_file_id_fkey', 'approval_file', type_='foreignkey')
|
||||
op.create_foreign_key(None, 'approval_file', 'file_data', ['file_data_id'], ['id'])
|
||||
op.drop_column('approval_file', 'id')
|
||||
op.drop_column('approval_file', 'file_version')
|
||||
op.drop_column('approval_file', 'file_id')
|
||||
op.drop_column('file', 'latest_version')
|
||||
op.add_column('file_data', sa.Column('date_created', sa.DateTime(timezone=True), nullable=True))
|
||||
op.drop_column('file_data', 'last_updated')
|
||||
op.drop_column('workflow', 'spec_version')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('workflow', sa.Column('spec_version', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.add_column('file_data', sa.Column('last_updated', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True))
|
||||
op.drop_column('file_data', 'date_created')
|
||||
op.add_column('file', sa.Column('latest_version', sa.INTEGER(), autoincrement=False, nullable=True))
|
||||
op.add_column('approval_file', sa.Column('file_id', sa.INTEGER(), autoincrement=False, nullable=False))
|
||||
op.add_column('approval_file', sa.Column('file_version', sa.INTEGER(), autoincrement=False, nullable=False))
|
||||
op.add_column('approval_file', sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False))
|
||||
op.drop_constraint(None, 'approval_file', type_='foreignkey')
|
||||
op.create_foreign_key('approval_file_file_id_fkey', 'approval_file', 'file', ['file_id'], ['id'])
|
||||
op.drop_column('approval_file', 'file_data_id')
|
||||
op.add_column('approval', sa.Column('workflow_hash', sa.VARCHAR(), autoincrement=False, nullable=True))
|
||||
op.drop_table('workflow_spec_dependency_file')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,3 @@
|
|||
from setuptools import setup
|
||||
|
||||
setup(setup_requires=["pbr"], pbr=True)
|
|
@ -1,7 +1,12 @@
|
|||
# Set environment variable to testing before loading.
|
||||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import json
|
||||
import os
|
||||
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
@ -10,10 +15,6 @@ from crc.models.protocol_builder import ProtocolBuilderStatus
|
|||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.models.user import UserModel
|
||||
|
@ -21,10 +22,9 @@ from crc.models.user import UserModel
|
|||
from crc import app, db, session
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
# import logging
|
||||
# logging.basicConfig()
|
||||
# logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
|
||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
import logging
|
||||
logging.basicConfig()
|
||||
|
||||
|
||||
class BaseTest(unittest.TestCase):
|
||||
|
@ -32,6 +32,10 @@ class BaseTest(unittest.TestCase):
|
|||
efficiently when we have a database in place.
|
||||
"""
|
||||
|
||||
if not app.config['TESTING']:
|
||||
raise (Exception("INVALID TEST CONFIGURATION. This is almost always in import order issue."
|
||||
"The first class to import in each test should be the base_test.py file."))
|
||||
|
||||
auths = {}
|
||||
test_uid = "dhf8r"
|
||||
|
||||
|
@ -83,7 +87,7 @@ class BaseTest(unittest.TestCase):
|
|||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.ctx.pop()
|
||||
session.remove()
|
||||
db.drop_all()
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
|
@ -97,12 +101,10 @@ class BaseTest(unittest.TestCase):
|
|||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||
if user is None:
|
||||
uid = self.test_uid
|
||||
user_info = {'uid': self.test_uid, 'first_name': 'Daniel', 'last_name': 'Funk',
|
||||
'email_address': 'dhf8r@virginia.edu'}
|
||||
user_info = {'uid': self.test_uid}
|
||||
else:
|
||||
uid = user.uid
|
||||
user_info = {'uid': user.uid, 'first_name': user.first_name, 'last_name': user.last_name,
|
||||
'email_address': user.email_address}
|
||||
user_info = {'uid': user.uid}
|
||||
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
|
||||
|
@ -113,10 +115,17 @@ class BaseTest(unittest.TestCase):
|
|||
self.assertIsNotNone(user_model.display_name)
|
||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||
|
||||
def load_example_data(self):
|
||||
def load_example_data(self, use_crc_data=False):
|
||||
"""use_crc_data will cause this to load the mammoth collection of documents
|
||||
we built up developing crc, otherwise it depends on a small setup for
|
||||
running tests."""
|
||||
|
||||
from example_data import ExampleDataLoader
|
||||
ExampleDataLoader.clean_db()
|
||||
ExampleDataLoader().load_all()
|
||||
if(use_crc_data):
|
||||
ExampleDataLoader().load_all()
|
||||
else:
|
||||
ExampleDataLoader().load_test_data()
|
||||
|
||||
for user_json in self.users:
|
||||
db.session.add(UserModel(**user_json))
|
||||
|
@ -125,6 +134,7 @@ class BaseTest(unittest.TestCase):
|
|||
study_model = StudyModel(**study_json)
|
||||
db.session.add(study_model)
|
||||
StudyService._add_all_workflow_specs_to_study(study_model)
|
||||
db.session.execute(Sequence(StudyModel.__tablename__ + '_id_seq'))
|
||||
db.session.commit()
|
||||
db.session.flush()
|
||||
|
||||
|
@ -149,7 +159,7 @@ class BaseTest(unittest.TestCase):
|
|||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return
|
||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec,
|
||||
category_id=category_id)
|
||||
|
@ -187,7 +197,7 @@ class BaseTest(unittest.TestCase):
|
|||
for key, value in items:
|
||||
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
||||
|
||||
query_string_list.append('redirect_url=%s' % redirect_url)
|
||||
query_string_list.append('redirect=%s' % redirect_url)
|
||||
|
||||
return '?%s' % '&'.join(query_string_list)
|
||||
|
||||
|
@ -203,10 +213,31 @@ class BaseTest(unittest.TestCase):
|
|||
content_type = CONTENT_TYPES[file_extension[1:]]
|
||||
file_service.update_file(file_model, data, content_type)
|
||||
|
||||
def create_user(self, uid="dhf8r", email="daniel.h.funk@gmail.com", display_name="Hoopy Frood"):
|
||||
user = session.query(UserModel).filter(UserModel.uid == uid).first()
|
||||
if user is None:
|
||||
user = UserModel(uid=uid, email_address=email, display_name=display_name)
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
return user
|
||||
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
|
||||
study = session.query(StudyModel).first()
|
||||
if study is None:
|
||||
user = self.create_user(uid=uid)
|
||||
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid=user.uid)
|
||||
db.session.add(study)
|
||||
db.session.commit()
|
||||
return study
|
||||
|
||||
def create_workflow(self, workflow_name, study=None, category_id=None):
|
||||
if study == None:
|
||||
study = session.query(StudyModel).first()
|
||||
spec = self.load_test_spec(workflow_name, category_id=category_id)
|
||||
db.session.flush()
|
||||
spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first()
|
||||
if spec is None:
|
||||
spec = self.load_test_spec(workflow_name, category_id=category_id)
|
||||
if study is None:
|
||||
study = self.create_study()
|
||||
workflow_model = StudyService._create_workflow_model(study, spec)
|
||||
return workflow_model
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_96a17d9" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_96a17d9" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_93a29b3" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_0637d8i</bpmn:outgoing>
|
||||
|
@ -27,7 +27,7 @@
|
|||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>SequenceFlow_1i7hk1a</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_11c35oq</bpmn:outgoing>
|
||||
<bpmn:script>CompleteTemplate Letter.docx AncillaryDocument.CoCApplication</bpmn:script>
|
||||
<bpmn:script>CompleteTemplate Letter.docx AD_CoCApp</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:endEvent id="EndEvent_0evb22x">
|
||||
<bpmn:incoming>SequenceFlow_11c35oq</bpmn:incoming>
|
||||
|
@ -36,30 +36,30 @@
|
|||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_93a29b3">
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0637d8i_di" bpmnElement="SequenceFlow_0637d8i">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="265" y="117" />
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_11c35oq_di" bpmnElement="SequenceFlow_11c35oq">
|
||||
<di:waypoint x="565" y="117" />
|
||||
<di:waypoint x="665" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="UserTask_02o51o8_di" bpmnElement="task_gather_information">
|
||||
<dc:Bounds x="265" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1i7hk1a_di" bpmnElement="SequenceFlow_1i7hk1a">
|
||||
<di:waypoint x="365" y="117" />
|
||||
<di:waypoint x="465" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0637d8i_di" bpmnElement="SequenceFlow_0637d8i">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="265" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="UserTask_02o51o8_di" bpmnElement="task_gather_information">
|
||||
<dc:Bounds x="265" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_0xjh8x4_di" bpmnElement="task_generate_document">
|
||||
<dc:Bounds x="465" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_0evb22x_di" bpmnElement="EndEvent_0evb22x">
|
||||
<dc:Bounds x="665" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_11c35oq_di" bpmnElement="SequenceFlow_11c35oq">
|
||||
<di:waypoint x="565" y="117" />
|
||||
<di:waypoint x="665" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1v1rp1q" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="empty_workflow" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_0lvudp8</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_0lvudp8" sourceRef="StartEvent_1" targetRef="EndEvent_0q4qzl9" />
|
||||
<bpmn:endEvent id="EndEvent_0q4qzl9">
|
||||
<bpmn:incoming>SequenceFlow_0lvudp8</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="empty_workflow">
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0lvudp8_di" bpmnElement="SequenceFlow_0lvudp8">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="432" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_0q4qzl9_di" bpmnElement="EndEvent_0q4qzl9">
|
||||
<dc:Bounds x="432" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,113 @@
|
|||
import json
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||
|
||||
|
||||
APPROVAL_PAYLOAD = {
|
||||
'id': None,
|
||||
'approver': {
|
||||
'uid': 'bgb22',
|
||||
'display_name': 'Billy Bob (bgb22)',
|
||||
'title': 'E42:He\'s a hoopy frood',
|
||||
'department': 'E0:EN-Eng Study of Parallel Universes'
|
||||
},
|
||||
'title': 'El Study',
|
||||
'status': 'DECLINED',
|
||||
'version': 1,
|
||||
'message': 'Incorrect documents',
|
||||
'associated_files': [
|
||||
{
|
||||
'id': 42,
|
||||
'name': 'File 1',
|
||||
'content_type': 'document'
|
||||
},
|
||||
{
|
||||
'id': 43,
|
||||
'name': 'File 2',
|
||||
'content_type': 'document'
|
||||
}
|
||||
],
|
||||
'workflow_id': 1,
|
||||
'study_id': 1
|
||||
}
|
||||
|
||||
|
||||
class TestApprovals(BaseTest):
|
||||
def setUp(self):
|
||||
"""Initial setup shared by all TestApprovals tests"""
|
||||
self.load_example_data()
|
||||
self.study = self.create_study()
|
||||
self.workflow = self.create_workflow('random_fact')
|
||||
# TODO: Move to base_test as a helper
|
||||
self.approval = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='arc93',
|
||||
status=ApprovalStatus.WAITING.value,
|
||||
version=1
|
||||
)
|
||||
session.add(self.approval)
|
||||
|
||||
self.approval_2 = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='dhf8r',
|
||||
status=ApprovalStatus.WAITING.value,
|
||||
version=1
|
||||
)
|
||||
session.add(self.approval_2)
|
||||
|
||||
session.commit()
|
||||
|
||||
def test_list_approvals_per_approver(self):
|
||||
"""Only approvals associated with approver should be returned"""
|
||||
approver_uid = self.approval_2.approver_uid
|
||||
rv = self.app.get(f'/v1.0/approval?approver_uid={approver_uid}', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
||||
response = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Stored approvals are 2
|
||||
approvals_count = ApprovalModel.query.count()
|
||||
self.assertEqual(approvals_count, 2)
|
||||
|
||||
# but Dan's approvals should be only 1
|
||||
self.assertEqual(len(response), 1)
|
||||
|
||||
# Confirm approver UID matches returned payload
|
||||
approval = ApprovalSchema().load(response[0])
|
||||
self.assertEqual(approval.approver['uid'], approver_uid)
|
||||
|
||||
def test_list_approvals_per_admin(self):
|
||||
"""All approvals will be returned"""
|
||||
rv = self.app.get('/v1.0/approval', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
||||
response = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Returned approvals should match what's in the db
|
||||
approvals_count = ApprovalModel.query.count()
|
||||
response_count = len(response)
|
||||
self.assertEqual(approvals_count, response_count)
|
||||
|
||||
def test_update_approval(self):
|
||||
"""Approval status will be updated"""
|
||||
approval_id = self.approval.id
|
||||
data = dict(APPROVAL_PAYLOAD)
|
||||
data['id'] = approval_id
|
||||
|
||||
self.assertEqual(self.approval.status, ApprovalStatus.WAITING.value)
|
||||
|
||||
rv = self.app.put(f'/v1.0/approval/{approval_id}',
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(data))
|
||||
self.assert_success(rv)
|
||||
|
||||
session.refresh(self.approval)
|
||||
|
||||
# Updated record should now have the data sent to the endpoint
|
||||
self.assertEqual(self.approval.message, data['message'])
|
||||
self.assertEqual(self.approval.status, ApprovalStatus.DECLINED.value)
|
|
@ -0,0 +1,59 @@
|
|||
from tests.base_test import BaseTest
|
||||
from crc import db
|
||||
from crc.models.approval import ApprovalModel
|
||||
from crc.services.approval_service import ApprovalService
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class TestApprovalsService(BaseTest):
|
||||
|
||||
def test_create_approval_record(self):
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow("empty_workflow")
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
self.assertEquals(1, db.session.query(ApprovalModel).count())
|
||||
model = db.session.query(ApprovalModel).first()
|
||||
self.assertEquals(workflow.study_id, model.study_id)
|
||||
self.assertEquals(workflow.id, model.workflow_id)
|
||||
self.assertEquals("dhf8r", model.approver_uid)
|
||||
self.assertEquals(1, model.version)
|
||||
|
||||
def test_new_requests_dont_add_if_approval_exists_for_current_workflow(self):
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow("empty_workflow")
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
self.assertEquals(1, db.session.query(ApprovalModel).count())
|
||||
model = db.session.query(ApprovalModel).first()
|
||||
self.assertEquals(1, model.version)
|
||||
|
||||
def test_new_approval_requests_after_file_modification_create_new_requests(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="AD_CoCAppr")
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
self.assertEquals(2, db.session.query(ApprovalModel).count())
|
||||
models = db.session.query(ApprovalModel).order_by(ApprovalModel.version).all()
|
||||
self.assertEquals(1, models[0].version)
|
||||
self.assertEquals(2, models[1].version)
|
||||
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import db
|
||||
from crc.models.user import UserModel
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestAuthentication(BaseTest):
|
||||
|
@ -12,8 +13,8 @@ class TestAuthentication(BaseTest):
|
|||
self.assertTrue(isinstance(auth_token, bytes))
|
||||
self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
|
||||
|
||||
def test_auth_creates_user(self):
|
||||
new_uid = 'czn1z';
|
||||
def test_backdoor_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
|
@ -37,6 +38,23 @@ class TestAuthentication(BaseTest):
|
|||
self.assertTrue(rv_2.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv_2.location, redirect_url))
|
||||
|
||||
def test_normal_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
redirect_url = 'http://worlds.best.website/admin'
|
||||
headers = dict(Uid=new_uid)
|
||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
|
||||
self.assert_success(rv)
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
self.assertEquals(new_uid, user.uid)
|
||||
self.assertEquals("Laura Barnes", user.display_name)
|
||||
self.assertEquals("lb3dp@virginia.edu", user.email_address)
|
||||
self.assertEquals("E0:Associate Professor of Systems and Information Engineering", user.title)
|
||||
|
||||
|
||||
def test_current_user_status(self):
|
||||
self.load_example_data()
|
||||
rv = self.app.get('/v1.0/user')
|
||||
|
@ -45,6 +63,7 @@ class TestAuthentication(BaseTest):
|
|||
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
||||
user = UserModel(uid="ajl2j", first_name='Aaron', last_name='Louie', email_address='ajl2j@virginia.edu')
|
||||
# User must exist in the mock ldap responses.
|
||||
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
|
||||
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
|
||||
self.assert_success(rv)
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
from tests.base_test import BaseTest
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class TestFileService(BaseTest):
|
||||
"""Largely tested via the test_file_api, and time is tight, but adding new tests here."""
|
||||
|
||||
def test_add_file_from_task_increments_version_and_replaces_on_subsequent_add(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('file_upload_form')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code=irb_code)
|
||||
# Add the file again with different data
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code=irb_code)
|
||||
|
||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||
self.assertEquals(1, len(file_models))
|
||||
|
||||
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
||||
self.assertEquals(1, len(file_data))
|
||||
self.assertEquals(2, file_data[0].version)
|
||||
|
||||
|
||||
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('file_upload_form')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
irb_doc_code=irb_code,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234')
|
||||
# Add the file again with different data
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
irb_doc_code=irb_code,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678')
|
||||
|
||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||
self.assertEquals(1, len(file_models))
|
||||
|
||||
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
||||
self.assertEquals(1, len(file_data))
|
||||
self.assertEquals(2, file_data[0].version)
|
||||
|
||||
def test_add_file_from_form_allows_multiple_files_with_different_names(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('file_upload_form')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
irb_doc_code=irb_code,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234')
|
||||
# Add the file again with different data
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
irb_doc_code=irb_code,
|
||||
name="a_different_thing.png", content_type="text",
|
||||
binary_data=b'5678')
|
||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||
self.assertEquals(2, len(file_models))
|
|
@ -1,15 +1,14 @@
|
|||
import io
|
||||
import json
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session
|
||||
from crc.models.file import FileModel, FileType, FileModelSchema, FileDataModel
|
||||
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from example_data import ExampleDataLoader
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestFilesApi(BaseTest):
|
||||
|
@ -20,7 +19,7 @@ class TestFilesApi(BaseTest):
|
|||
return (minimal_dbpm % content).encode()
|
||||
|
||||
def test_list_files_for_workflow_spec(self):
|
||||
self.load_example_data()
|
||||
self.load_example_data(use_crc_data=True)
|
||||
spec_id = 'core_info'
|
||||
spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
|
||||
rv = self.app.get('/v1.0/file?workflow_spec_id=%s' % spec_id,
|
||||
|
@ -166,17 +165,16 @@ class TestFilesApi(BaseTest):
|
|||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assertIsNotNone(rv.get_data())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
file = FileModelSchema().load(json_data, session=session)
|
||||
self.assertEqual(2, file.latest_version)
|
||||
self.assertEqual(FileType.bpmn, file.type)
|
||||
self.assertEqual("application/octet-stream", file.content_type)
|
||||
file_json = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(2, file_json['latest_version'])
|
||||
self.assertEqual(FileType.bpmn.value, file_json['type'])
|
||||
self.assertEqual("application/octet-stream", file_json['content_type'])
|
||||
self.assertEqual(spec.id, file.workflow_spec_id)
|
||||
|
||||
# Assure it is updated in the database and properly persisted.
|
||||
file_model = session.query(FileModel).filter(FileModel.id == file.id).first()
|
||||
self.assertEqual(2, file_model.latest_version)
|
||||
|
||||
file_data = FileService.get_file_data(file_model.id)
|
||||
self.assertEqual(2, file_data.version)
|
||||
|
||||
rv = self.app.get('/v1.0/file/%i/data' % file.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
@ -193,16 +191,13 @@ class TestFilesApi(BaseTest):
|
|||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
self.assertIsNotNone(rv.get_data())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
file = FileModelSchema().load(json_data, session=session)
|
||||
self.assertEqual(1, file.latest_version)
|
||||
self.assertEqual(1, json_data['latest_version'])
|
||||
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
|
||||
rv = self.app.put('/v1.0/file/%i/data' % file.id, data=data, follow_redirects=True,
|
||||
rv = self.app.put('/v1.0/file/%i/data' % json_data['id'], data=data, follow_redirects=True,
|
||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
self.assertIsNotNone(rv.get_data())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
file = FileModelSchema().load(json_data, session=session)
|
||||
self.assertEqual(1, file.latest_version)
|
||||
|
||||
self.assertEqual(1, json_data['latest_version'])
|
||||
|
||||
def test_get_file(self):
|
||||
self.load_example_data()
|
||||
|
|
|
@ -21,7 +21,7 @@ class TestLdapService(BaseTest):
|
|||
self.assertEqual("lb3dp", user_info.uid)
|
||||
self.assertEqual("Laura Barnes", user_info.display_name)
|
||||
self.assertEqual("Laura", user_info.given_name)
|
||||
self.assertEqual("lb3dp@virginia.edu", user_info.email)
|
||||
self.assertEqual("lb3dp@virginia.edu", user_info.email_address)
|
||||
self.assertEqual("+1 (434) 924-1723", user_info.telephone_number)
|
||||
self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user_info.title)
|
||||
self.assertEqual("E0:EN-Eng Sys and Environment", user_info.department)
|
||||
|
|
|
@ -1,79 +1,128 @@
|
|||
from crc import session
|
||||
from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
||||
import os
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.file_service import FileService
|
||||
from crc.api.common import ApiError
|
||||
from crc import session, app
|
||||
from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel, CONTENT_TYPES
|
||||
from crc.services.lookup_service import LookupService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestLookupService(BaseTest):
|
||||
|
||||
def test_create_lookup_file_multiple_times_does_not_update_database(self):
|
||||
spec = self.load_test_spec('enum_options_from_file')
|
||||
def test_lookup_returns_good_error_on_bad_field(self):
|
||||
spec = BaseTest.load_test_spec('enum_options_with_search')
|
||||
workflow = self.create_workflow('enum_options_with_search')
|
||||
file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
|
||||
file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
|
||||
LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
|
||||
LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
|
||||
LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
|
||||
with self.assertRaises(ApiError):
|
||||
LookupService.lookup(workflow, "not_the_right_field", "sam", limit=10)
|
||||
|
||||
def test_lookup_table_is_not_created_more_than_once(self):
|
||||
spec = BaseTest.load_test_spec('enum_options_with_search')
|
||||
workflow = self.create_workflow('enum_options_with_search')
|
||||
LookupService.lookup(workflow, "sponsor", "sam", limit=10)
|
||||
LookupService.lookup(workflow, "sponsor", "something", limit=10)
|
||||
LookupService.lookup(workflow, "sponsor", "blah", limit=10)
|
||||
lookup_records = session.query(LookupFileModel).all()
|
||||
self.assertIsNotNone(lookup_records)
|
||||
self.assertEqual(1, len(lookup_records))
|
||||
lookup_record = lookup_records[0]
|
||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||
self.assertEquals(19, len(lookup_data))
|
||||
# Using the same table with different lookup lable or value, does create additional records.
|
||||
LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NAME", "CUSTOMER_NUMBER")
|
||||
self.assertEquals(28, len(lookup_data))
|
||||
|
||||
def test_updates_to_file_cause_lookup_rebuild(self):
|
||||
spec = BaseTest.load_test_spec('enum_options_with_search')
|
||||
workflow = self.create_workflow('enum_options_with_search')
|
||||
file_model = session.query(FileModel).filter(FileModel.name == "sponsors.xls").first()
|
||||
LookupService.lookup(workflow, "sponsor", "sam", limit=10)
|
||||
lookup_records = session.query(LookupFileModel).all()
|
||||
self.assertIsNotNone(lookup_records)
|
||||
self.assertEqual(2, len(lookup_records))
|
||||
FileService.delete_file(file_model.id) ## Assure we can delete the file.
|
||||
self.assertEqual(1, len(lookup_records))
|
||||
lookup_record = lookup_records[0]
|
||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||
self.assertEquals(28, len(lookup_data))
|
||||
|
||||
# Update the workflow specification file.
|
||||
file_path = os.path.join(app.root_path, '..', 'tests', 'data',
|
||||
'enum_options_with_search', 'sponsors_modified.xls')
|
||||
file = open(file_path, 'rb')
|
||||
FileService.update_file(file_model, file.read(), CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
# restart the workflow, so it can pick up the changes.
|
||||
WorkflowProcessor(workflow, soft_reset=True)
|
||||
|
||||
LookupService.lookup(workflow, "sponsor", "sam", limit=10)
|
||||
lookup_records = session.query(LookupFileModel).all()
|
||||
lookup_record = lookup_records[0]
|
||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||
self.assertEquals(4, len(lookup_data))
|
||||
|
||||
|
||||
|
||||
def test_some_full_text_queries(self):
|
||||
self.load_test_spec('enum_options_from_file')
|
||||
file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
|
||||
self.assertIsNotNone(file_model)
|
||||
file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
|
||||
lookup_table = LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
|
||||
spec = BaseTest.load_test_spec('enum_options_from_file')
|
||||
workflow = self.create_workflow('enum_options_from_file')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
processor.do_engine_steps()
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "medicines", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "", limit=10)
|
||||
self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
|
||||
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "medicines", limit=10)
|
||||
self.assertEquals(1, len(results), "words in the middle of label are detected.")
|
||||
self.assertEquals("The Medicines Company", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "", limit=10)
|
||||
self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "UVA", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "UVA", limit=10)
|
||||
self.assertEquals(1, len(results), "Beginning of label is found.")
|
||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "uva", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "uva", limit=10)
|
||||
self.assertEquals(1, len(results), "case does not matter.")
|
||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
||||
|
||||
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "medici", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "medici", limit=10)
|
||||
self.assertEquals(1, len(results), "partial words are picked up.")
|
||||
self.assertEquals("The Medicines Company", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "Genetics Savings", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Savings", limit=10)
|
||||
self.assertEquals(1, len(results), "multiple terms are picked up..")
|
||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "Genetics Sav", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Sav", limit=10)
|
||||
self.assertEquals(1, len(results), "prefix queries still work with partial terms")
|
||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "Gen Sav", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "Gen Sav", limit=10)
|
||||
self.assertEquals(1, len(results), "prefix queries still work with ALL the partial terms")
|
||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
||||
|
||||
results = LookupService._run_lookup_query(lookup_table, "Inc", limit=10)
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "Inc", limit=10)
|
||||
self.assertEquals(7, len(results), "short terms get multiple correct results.")
|
||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
||||
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "reaction design", limit=10)
|
||||
self.assertEquals(5, len(results), "all results come back for two terms.")
|
||||
self.assertEquals("Reaction Design", results[0].label, "Exact matches come first.")
|
||||
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10)
|
||||
self.assertEquals("1 Something", results[0].label, "Exact matches are prefered")
|
||||
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10)
|
||||
self.assertEquals("1 Something", results[0].label, "special characters don't flake out")
|
||||
|
||||
|
||||
# 1018 10000 Something Industry
|
||||
# 1019 1000 Something Industry
|
||||
# 1020 1 Something Industry
|
||||
# 1021 10 Something Industry
|
||||
# 1022 10000 Something Industry
|
||||
|
||||
# Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
|
||||
#results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
|
||||
#self.assertEquals(7, len(results), "stop words are not removed.")
|
||||
#self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
from unittest.mock import patch
|
||||
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc import app
|
||||
from tests.base_test import BaseTest
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
|
||||
|
||||
class TestProtocolBuilder(BaseTest):
|
||||
|
@ -10,6 +11,7 @@ class TestProtocolBuilder(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_studies(self, mock_get):
|
||||
app.config['PB_ENABLED'] = True
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('user_studies.json')
|
||||
response = ProtocolBuilderService.get_studies(self.test_uid)
|
||||
|
@ -17,6 +19,7 @@ class TestProtocolBuilder(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_investigators(self, mock_get):
|
||||
app.config['PB_ENABLED'] = True
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
|
||||
response = ProtocolBuilderService.get_investigators(self.test_study_id)
|
||||
|
@ -28,6 +31,7 @@ class TestProtocolBuilder(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_required_docs(self, mock_get):
|
||||
app.config['PB_ENABLED'] = True
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('required_docs.json')
|
||||
response = ProtocolBuilderService.get_required_docs(self.test_study_id)
|
||||
|
@ -37,6 +41,7 @@ class TestProtocolBuilder(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_get_details(self, mock_get):
|
||||
app.config['PB_ENABLED'] = True
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('study_details.json')
|
||||
response = ProtocolBuilderService.get_study_details(self.test_study_id)
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
from crc.services.file_service import FileService
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.scripts.request_approval import RequestApproval
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.api.common import ApiError
|
||||
|
||||
from crc import db
|
||||
from crc.models.approval import ApprovalModel
|
||||
|
||||
|
||||
class TestRequestApprovalScript(BaseTest):
|
||||
|
||||
def test_do_task(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
task.data = {"study": {"approval1": "dhf8r", 'approval2':'lb3dp'}}
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
irb_doc_code="UVACompl_PRCAppr",
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234')
|
||||
script = RequestApproval()
|
||||
script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
|
||||
self.assertEquals(2, db.session.query(ApprovalModel).count())
|
||||
|
||||
def test_do_task_with_incorrect_argument(self):
|
||||
"""This script should raise an error if it can't figure out the approvers."""
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
task.data = {"approvals": {'dhf8r':["invalid"], 'lb3dp':"invalid"}}
|
||||
script = RequestApproval()
|
||||
with self.assertRaises(ApiError):
|
||||
script.do_task(task, workflow.study_id, workflow.id, "approvals")
|
||||
|
||||
def test_do_task_validate_only(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
task.data = {"study": {"approval1": "dhf8r", 'approval2':'lb3dp'}}
|
||||
|
||||
script = RequestApproval()
|
||||
script.do_task_validate_only(task, workflow.study_id, workflow.id, "study.approval1")
|
||||
self.assertEquals(0, db.session.query(ApprovalModel).count())
|
||||
|
|
@ -1,27 +1,25 @@
|
|||
import json
|
||||
from tests.base_test import BaseTest
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import patch
|
||||
|
||||
from crc import session
|
||||
from crc import session, app
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, \
|
||||
ProtocolBuilderStudySchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel, StudySchema
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
|
||||
|
||||
class TestStudyApi(BaseTest):
|
||||
|
||||
TEST_STUDY = {
|
||||
"id": 12345,
|
||||
"title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents "
|
||||
"for Interstellar Spacecraft",
|
||||
"last_updated": datetime.now(tz=timezone.utc),
|
||||
"protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
|
||||
"primary_investigator_id": "tricia.marie.mcmillan@heartofgold.edu",
|
||||
"sponsor": "Sirius Cybernetics Corporation",
|
||||
"ind_number": "567890",
|
||||
"primary_investigator_id": "tmm2x",
|
||||
"user_uid": "dhf8r",
|
||||
}
|
||||
|
||||
|
@ -38,35 +36,15 @@ class TestStudyApi(BaseTest):
|
|||
study = session.query(StudyModel).first()
|
||||
self.assertIsNotNone(study)
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
||||
def test_get_study(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
||||
def test_get_study(self):
|
||||
"""Generic test, but pretty detailed, in that the study should return a categorized list of workflows
|
||||
This starts with out loading the example data, to show that all the bases are covered from ground 0."""
|
||||
|
||||
# Mock Protocol Builder responses
|
||||
studies_response = self.protocol_builder_response('user_studies.json')
|
||||
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
||||
details_response = self.protocol_builder_response('study_details.json')
|
||||
mock_details.return_value = json.loads(details_response)
|
||||
docs_response = self.protocol_builder_response('required_docs.json')
|
||||
mock_docs.return_value = json.loads(docs_response)
|
||||
investigators_response = self.protocol_builder_response('investigators.json')
|
||||
mock_investigators.return_value = json.loads(investigators_response)
|
||||
|
||||
"""NOTE: The protocol builder is not enabled or mocked out. As the master workflow (which is empty),
|
||||
and the test workflow do not need it, and it is disabled in the configuration."""
|
||||
self.load_example_data()
|
||||
new_study = self.add_test_study()
|
||||
new_study = session.query(StudyModel).filter_by(id=new_study["id"]).first()
|
||||
# Add a category
|
||||
new_category = WorkflowSpecCategoryModel(id=21, name="test_cat", display_name="Test Category", display_order=0)
|
||||
session.add(new_category)
|
||||
session.commit()
|
||||
# Create a workflow specification
|
||||
self.create_workflow("random_fact", study=new_study, category_id=new_category.id)
|
||||
# Assure there is a master specification, and it has the lookup files it needs.
|
||||
spec = self.load_test_spec("top_level_workflow", master_spec=True)
|
||||
self.create_reference_document()
|
||||
|
||||
api_response = self.app.get('/v1.0/study/%i' % new_study.id,
|
||||
headers=self.logged_in_headers(), content_type="application/json")
|
||||
|
@ -75,13 +53,12 @@ class TestStudyApi(BaseTest):
|
|||
|
||||
self.assertEqual(study.title, self.TEST_STUDY['title'])
|
||||
self.assertEqual(study.primary_investigator_id, self.TEST_STUDY['primary_investigator_id'])
|
||||
self.assertEqual(study.sponsor, self.TEST_STUDY['sponsor'])
|
||||
self.assertEqual(study.ind_number, self.TEST_STUDY['ind_number'])
|
||||
self.assertEqual(study.user_uid, self.TEST_STUDY['user_uid'])
|
||||
|
||||
# Categories are read only, so switching to sub-scripting here.
|
||||
category = [c for c in study.categories if c['name'] == "test_cat"][0]
|
||||
self.assertEqual("test_cat", category['name'])
|
||||
# This assumes there is one test category set up in the example data.
|
||||
category = study.categories[0]
|
||||
self.assertEqual("test_category", category['name'])
|
||||
self.assertEqual("Test Category", category['display_name'])
|
||||
self.assertEqual(1, len(category["workflows"]))
|
||||
workflow = category["workflows"][0]
|
||||
|
@ -94,7 +71,7 @@ class TestStudyApi(BaseTest):
|
|||
def test_add_study(self):
|
||||
self.load_example_data()
|
||||
study = self.add_test_study()
|
||||
db_study = session.query(StudyModel).filter_by(id=12345).first()
|
||||
db_study = session.query(StudyModel).filter_by(id=study['id']).first()
|
||||
self.assertIsNotNone(db_study)
|
||||
self.assertEqual(study["title"], db_study.title)
|
||||
self.assertEqual(study["primary_investigator_id"], db_study.primary_investigator_id)
|
||||
|
@ -103,7 +80,7 @@ class TestStudyApi(BaseTest):
|
|||
self.assertEqual(study["user_uid"], db_study.user_uid)
|
||||
|
||||
workflow_spec_count =session.query(WorkflowSpecModel).filter(WorkflowSpecModel.is_master_spec == False).count()
|
||||
workflow_count = session.query(WorkflowModel).filter(WorkflowModel.study_id == 12345).count()
|
||||
workflow_count = session.query(WorkflowModel).filter(WorkflowModel.study_id == study['id']).count()
|
||||
error_count = len(study["errors"])
|
||||
self.assertEqual(workflow_spec_count, workflow_count + error_count)
|
||||
|
||||
|
@ -126,6 +103,9 @@ class TestStudyApi(BaseTest):
|
|||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
||||
def test_get_all_studies(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
||||
# Enable the protocol builder for these tests, as the master_workflow and other workflows
|
||||
# depend on using the PB for data.
|
||||
app.config['PB_ENABLED'] = True
|
||||
self.load_example_data()
|
||||
s = StudyModel(
|
||||
id=54321, # This matches one of the ids from the study_details_json data.
|
||||
|
@ -208,6 +188,7 @@ class TestStudyApi(BaseTest):
|
|||
self.assertEqual(study.sponsor, json_data['sponsor'])
|
||||
self.assertEqual(study.ind_number, json_data['ind_number'])
|
||||
|
||||
|
||||
def test_delete_study(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
|
@ -216,20 +197,13 @@ class TestStudyApi(BaseTest):
|
|||
|
||||
def test_delete_study_with_workflow_and_status(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
new_category = WorkflowSpecCategoryModel(id=21, name="test_cat", display_name="Test Category", display_order=0)
|
||||
session.add(new_category)
|
||||
session.commit()
|
||||
# Create a workflow specification, and complete some stuff that would log stats
|
||||
workflow = self.create_workflow("random_fact", study=study, category_id=new_category.id)
|
||||
session.add(workflow)
|
||||
session.commit()
|
||||
stats2 = TaskEventModel(study_id=study.id, workflow_id=workflow.id, user_uid=self.users[0]['uid'])
|
||||
workflow = session.query(WorkflowModel).first()
|
||||
stats2 = TaskEventModel(study_id=workflow.study_id, workflow_id=workflow.id, user_uid=self.users[0]['uid'])
|
||||
session.add(stats2)
|
||||
session.commit()
|
||||
rv = self.app.delete('/v1.0/study/%i' % study.id, headers=self.logged_in_headers())
|
||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
del_study = session.query(StudyModel).filter(StudyModel.id == study.id).first()
|
||||
del_study = session.query(StudyModel).filter(StudyModel.id == workflow.study_id).first()
|
||||
self.assertIsNone(del_study)
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import json
|
||||
from tests.base_test import BaseTest
|
||||
from unittest.mock import patch
|
||||
|
||||
from crc import db, session
|
||||
|
@ -10,7 +11,6 @@ from crc.scripts.study_info import StudyInfo
|
|||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestStudyDetailsDocumentsScript(BaseTest):
|
||||
|
@ -63,7 +63,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
|
|||
workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task = processor.next_task()
|
||||
StudyInfo().do_task_validate_only(task, study.id, "documents")
|
||||
StudyInfo().do_task_validate_only(task, study.id, workflow_model.id, "documents")
|
||||
|
||||
def test_load_lookup_data(self):
|
||||
self.create_reference_document()
|
||||
|
|
|
@ -2,7 +2,9 @@ import json
|
|||
from datetime import datetime
|
||||
from unittest.mock import patch
|
||||
|
||||
from crc import db
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import db, app
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
|
@ -12,7 +14,6 @@ from crc.services.file_service import FileService
|
|||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from example_data import ExampleDataLoader
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestStudyService(BaseTest):
|
||||
|
@ -57,7 +58,7 @@ class TestStudyService(BaseTest):
|
|||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
def test_total_tasks_updated(self, mock_docs):
|
||||
"""Assure that as a users progress is available when getting a list of studies for that user."""
|
||||
|
||||
app.config['PB_ENABLED'] = True
|
||||
docs_response = self.protocol_builder_response('required_docs.json')
|
||||
mock_docs.return_value = json.loads(docs_response)
|
||||
|
||||
|
@ -73,7 +74,6 @@ class TestStudyService(BaseTest):
|
|||
|
||||
# workflow should not be started, and it should have 0 completed tasks, and 0 total tasks.
|
||||
self.assertEqual(WorkflowStatus.not_started, workflow.status)
|
||||
self.assertEqual(None, workflow.spec_version)
|
||||
self.assertEqual(0, workflow.total_tasks)
|
||||
self.assertEqual(0, workflow.completed_tasks)
|
||||
|
||||
|
@ -106,7 +106,7 @@ class TestStudyService(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
def test_get_required_docs(self, mock_docs):
|
||||
|
||||
app.config['PB_ENABLED'] = True
|
||||
# mock out the protocol builder
|
||||
docs_response = self.protocol_builder_response('required_docs.json')
|
||||
mock_docs.return_value = json.loads(docs_response)
|
||||
|
@ -143,11 +143,9 @@ class TestStudyService(BaseTest):
|
|||
# Add a document to the study with the correct code.
|
||||
workflow = self.create_workflow('docx')
|
||||
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||
FileService.add_task_file(study_id=workflow.study_id, workflow_id=workflow.id,
|
||||
workflow_spec_id=workflow.workflow_spec_id,
|
||||
task_id="fakingthisout",
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code=irb_code)
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code=irb_code)
|
||||
|
||||
docs = StudyService().get_documents_status(workflow.study_id)
|
||||
self.assertIsNotNone(docs)
|
||||
|
@ -156,13 +154,31 @@ class TestStudyService(BaseTest):
|
|||
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
|
||||
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
|
||||
self.assertEquals(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
|
||||
self.assertEquals(workflow.workflow_spec_id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_spec_id'])
|
||||
|
||||
# 'file_id': 123,
|
||||
# 'task_id': 'abcdef14236890',
|
||||
# 'workflow_id': 456,
|
||||
# 'workflow_spec_id': 'irb_api_details',
|
||||
# 'status': 'complete',
|
||||
def test_get_all_studies(self):
|
||||
user = self.create_user_with_study_and_workflow()
|
||||
|
||||
# Add a document to the study with the correct code.
|
||||
workflow1 = self.create_workflow('docx')
|
||||
workflow2 = self.create_workflow('empty_workflow')
|
||||
|
||||
# Add files to both workflows.
|
||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
|
||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code="AD_Consent_Model")
|
||||
FileService.add_workflow_file(workflow_id=workflow2.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
|
||||
|
||||
studies = StudyService().get_all_studies_with_files()
|
||||
self.assertEquals(1, len(studies))
|
||||
self.assertEquals(3, len(studies[0].files))
|
||||
|
||||
|
||||
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs
|
||||
def test_get_personnel(self, mock_docs):
|
||||
|
|
|
@ -3,13 +3,15 @@ import os
|
|||
import random
|
||||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
@ -180,7 +182,6 @@ class TestTasksApi(BaseTest):
|
|||
self.assertEquals("Task 2b", nav[5]['title'])
|
||||
self.assertEquals("Task 3", nav[6]['title'])
|
||||
|
||||
|
||||
def test_document_added_to_workflow_shows_up_in_file_list(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
|
@ -302,11 +303,16 @@ class TestTasksApi(BaseTest):
|
|||
|
||||
@patch('crc.services.protocol_builder.requests.get')
|
||||
def test_multi_instance_task(self, mock_get):
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
# Enable the protocol builder.
|
||||
app.config['PB_ENABLED'] = True
|
||||
|
||||
# This depends on getting a list of investigators back from the protocol builder.
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
|
||||
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('multi_instance')
|
||||
|
||||
# get the first form in the two form workflow.
|
||||
|
@ -328,8 +334,8 @@ class TestTasksApi(BaseTest):
|
|||
workflow = self.get_workflow_api(workflow)
|
||||
task = workflow.next_task
|
||||
field_id = task.form['fields'][0]['id']
|
||||
rv = self.app.get('/v1.0/workflow/%i/task/%s/lookup/%s?query=%s&limit=5' %
|
||||
(workflow.id, task.id, field_id, 'c'), # All records with a word that starts with 'c'
|
||||
rv = self.app.get('/v1.0/workflow/%i/lookup/%s?query=%s&limit=5' %
|
||||
(workflow.id, field_id, 'c'), # All records with a word that starts with 'c'
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
|
@ -344,8 +350,8 @@ class TestTasksApi(BaseTest):
|
|||
task = workflow.next_task
|
||||
field_id = task.form['fields'][0]['id']
|
||||
# lb3dp is a user record in the mock ldap responses for tests.
|
||||
rv = self.app.get('/v1.0/workflow/%i/task/%s/lookup/%s?query=%s&limit=5' %
|
||||
(workflow.id, task.id, field_id, 'lb3dp'),
|
||||
rv = self.app.get('/v1.0/workflow/%s/lookup/%s?query=%s&limit=5' %
|
||||
(workflow.id, field_id, 'lb3dp'),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
|
@ -419,11 +425,12 @@ class TestTasksApi(BaseTest):
|
|||
def test_parallel_multi_instance(self, mock_get):
|
||||
|
||||
# Assure we get nine investigators back from the API Call, as set in the investigators.json file.
|
||||
app.config['PB_ENABLED'] = True
|
||||
mock_get.return_value.ok = True
|
||||
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
|
||||
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
workflow = self.create_workflow('multi_instance_parallel')
|
||||
|
||||
workflow_api = self.get_workflow_api(workflow)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
from crc import app
|
||||
from tests.base_test import BaseTest
|
||||
from crc import app
|
||||
|
||||
|
||||
class TestStudyApi(BaseTest):
|
||||
|
@ -22,11 +22,13 @@ class TestStudyApi(BaseTest):
|
|||
{"option": "Address", "selected": False},
|
||||
{"option": "Phone", "selected": True, "stored": ["Send or Transmit outside of UVA"]}]}
|
||||
with open(filepath, 'rb') as f:
|
||||
file_data = {'file': (f, 'my_new_file.bpmn')}
|
||||
rv = self.app.put('/v1.0/render_docx?data=%s' % json.dumps(template_data),
|
||||
file_data = {'file': (f, 'my_new_file.bpmn'), 'data': json.dumps(template_data)}
|
||||
rv = self.app.put('/v1.0/render_docx',
|
||||
data=file_data, follow_redirects=True,
|
||||
content_type='multipart/form-data')
|
||||
self.assert_success(rv)
|
||||
self.assertIsNotNone(rv.data)
|
||||
self.assertEquals('application/octet-stream', rv.content_type)
|
||||
|
||||
def test_list_scripts(self):
|
||||
rv = self.app.get('/v1.0/list_scripts')
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.scripts.update_study import UpdateStudy
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class TestUpdateStudyScript(BaseTest):
|
||||
|
||||
def test_do_task(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
task = processor.next_task()
|
||||
task.data = {"details": {
|
||||
"label": "My New Title",
|
||||
"value": "dhf8r"}
|
||||
}
|
||||
|
||||
script = UpdateStudy()
|
||||
script.do_task(task, workflow.study_id, workflow.id, "title:details.label", "pi:details.value")
|
||||
self.assertEquals("My New Title", workflow.study.title)
|
||||
self.assertEquals("dhf8r", workflow.study.primary_investigator_id)
|
|
@ -1,34 +1,31 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
import string
|
||||
import random
|
||||
from unittest.mock import patch
|
||||
|
||||
from SpiffWorkflow import Task as SpiffTask
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
||||
from SpiffWorkflow.camunda.specs.UserTask import Form, FormField
|
||||
from SpiffWorkflow.specs import TaskSpec
|
||||
from SpiffWorkflow.camunda.specs.UserTask import FormField
|
||||
|
||||
from crc import session, db, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.file import FileModel, FileDataModel
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowStatus, WorkflowModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowStatus
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema, ProtocolBuilderInvestigatorSchema, \
|
||||
ProtocolBuilderRequiredDocumentSchema
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from tests.base_test import BaseTest
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
|
||||
|
||||
class TestWorkflowProcessor(BaseTest):
|
||||
|
||||
def _populate_form_with_random_data(self, task):
|
||||
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||
WorkflowProcessor.populate_form_with_random_data(task, api_task)
|
||||
WorkflowService.populate_form_with_random_data(task, api_task)
|
||||
|
||||
def get_processor(self, study_model, spec_model):
|
||||
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
||||
|
@ -226,10 +223,10 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self._populate_form_with_random_data(task)
|
||||
processor.complete_task(task)
|
||||
|
||||
files = session.query(FileModel).filter_by(study_id=study.id, workflow_id=processor.get_workflow_id()).all()
|
||||
files = session.query(FileModel).filter_by(workflow_id=processor.get_workflow_id()).all()
|
||||
self.assertEqual(0, len(files))
|
||||
processor.do_engine_steps()
|
||||
files = session.query(FileModel).filter_by(study_id=study.id, workflow_id=processor.get_workflow_id()).all()
|
||||
files = session.query(FileModel).filter_by(workflow_id=processor.get_workflow_id()).all()
|
||||
self.assertEqual(1, len(files), "The task should create a new file.")
|
||||
file_data = session.query(FileDataModel).filter(FileDataModel.file_model_id == files[0].id).first()
|
||||
self.assertIsNotNone(file_data.data)
|
||||
|
@ -257,12 +254,12 @@ class TestWorkflowProcessor(BaseTest):
|
|||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("decision_table")
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v1.1'))
|
||||
self.assertTrue(processor.get_version_string().startswith('v1.1'))
|
||||
file_service = FileService()
|
||||
|
||||
file_service.add_workflow_spec_file(workflow_spec_model, "new_file.txt", "txt", b'blahblah')
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v1.1.1'))
|
||||
self.assertTrue(processor.get_version_string().startswith('v1.1.1'))
|
||||
|
||||
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'docx', 'docx.bpmn')
|
||||
file = open(file_path, "rb")
|
||||
|
@ -271,7 +268,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
file_model = db.session.query(FileModel).filter(FileModel.name == "decision_table.bpmn").first()
|
||||
file_service.update_file(file_model, data, "txt")
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v2.1.1'))
|
||||
self.assertTrue(processor.get_version_string().startswith('v2.1.1'))
|
||||
|
||||
def test_restart_workflow(self):
|
||||
self.load_example_data()
|
||||
|
@ -342,7 +339,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
# Assure that creating a new processor doesn't cause any issues, and maintains the spec version.
|
||||
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(processor.workflow_model)
|
||||
self.assertTrue(processor2.get_spec_version().startswith("v1 ")) # Still at version 1.
|
||||
self.assertFalse(processor2.is_latest_spec) # Still at version 1.
|
||||
|
||||
# Do a hard reset, which should bring us back to the beginning, but retain the data.
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True)
|
||||
|
@ -352,16 +349,12 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.assertEqual("New Step", processor3.next_task().task_spec.description)
|
||||
self.assertEqual("blue", processor3.next_task().data["color"])
|
||||
|
||||
def test_get_latest_spec_version(self):
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
version = WorkflowProcessor.get_latest_version_string("two_forms")
|
||||
self.assertTrue(version.startswith("v1 "))
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
|
||||
def test_master_bpmn(self, mock_details, mock_required_docs, mock_investigators, mock_studies):
|
||||
def test_master_bpmn_for_crc(self, mock_details, mock_required_docs, mock_investigators, mock_studies):
|
||||
|
||||
# Mock Protocol Builder response
|
||||
studies_response = self.protocol_builder_response('user_studies.json')
|
||||
|
@ -376,7 +369,8 @@ class TestWorkflowProcessor(BaseTest):
|
|||
details_response = self.protocol_builder_response('study_details.json')
|
||||
mock_details.return_value = json.loads(details_response)
|
||||
|
||||
self.load_example_data()
|
||||
self.load_example_data(use_crc_data=True)
|
||||
app.config['PB_ENABLED'] = True
|
||||
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = db.session.query(WorkflowSpecModel).\
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
from crc import session
|
||||
from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestWorkflowService(BaseTest):
|
||||
|
@ -31,7 +28,7 @@ class TestWorkflowService(BaseTest):
|
|||
* bullet one
|
||||
* bullet two has {{replace_me}}
|
||||
|
||||
# other stuff.
|
||||
# other stuff.
|
||||
"""
|
||||
expected = """
|
||||
# Bigger Test
|
||||
|
@ -39,7 +36,7 @@ class TestWorkflowService(BaseTest):
|
|||
* bullet one
|
||||
* bullet two has new_thing
|
||||
|
||||
# other stuff.
|
||||
# other stuff.
|
||||
"""
|
||||
task.task_spec.documentation = documentation
|
||||
result = WorkflowService._process_documentation(task)
|
||||
|
@ -69,38 +66,16 @@ class TestWorkflowService(BaseTest):
|
|||
task = processor.next_task()
|
||||
WorkflowService.process_options(task, task.task_spec.form.fields[0])
|
||||
options = task.task_spec.form.fields[0].options
|
||||
self.assertEquals(19, len(options))
|
||||
self.assertEquals(28, len(options))
|
||||
self.assertEquals('1000', options[0]['id'])
|
||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
|
||||
|
||||
def test_create_lookup_file(self):
|
||||
spec = self.load_test_spec('enum_options_from_file')
|
||||
file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
|
||||
file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
|
||||
LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
|
||||
lookup_records = session.query(LookupFileModel).all()
|
||||
self.assertIsNotNone(lookup_records)
|
||||
self.assertEqual(1, len(lookup_records))
|
||||
lookup_record = lookup_records[0]
|
||||
self.assertIsNotNone(lookup_record)
|
||||
self.assertEquals("CUSTOMER_NUMBER", lookup_record.value_column)
|
||||
self.assertEquals("CUSTOMER_NAME", lookup_record.label_column)
|
||||
self.assertEquals("CUSTOMER_NAME", lookup_record.label_column)
|
||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||
self.assertEquals(19, len(lookup_data))
|
||||
|
||||
self.assertEquals("1000", lookup_data[0].value)
|
||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", lookup_data[0].label)
|
||||
# search_results = session.query(LookupDataModel).\
|
||||
# filter(LookupDataModel.lookup_file_model_id == lookup_record.id).\
|
||||
# filter(LookupDataModel.__ts_vector__.op('@@')(func.plainto_tsquery('INTERNAL'))).all()
|
||||
search_results = LookupDataModel.query.filter(LookupDataModel.label.match("INTERNAL")).all()
|
||||
self.assertEquals(1, len(search_results))
|
||||
search_results = LookupDataModel.query.filter(LookupDataModel.label.match("internal")).all()
|
||||
self.assertEquals(1, len(search_results))
|
||||
# This query finds results where a word starts with "bio"
|
||||
search_results = LookupDataModel.query.filter(LookupDataModel.label.match("bio:*")).all()
|
||||
self.assertEquals(2, len(search_results))
|
||||
|
||||
|
||||
|
||||
def test_random_data_populate_form_on_auto_complete(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('enum_options_with_search')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
processor.do_engine_steps()
|
||||
task = processor.next_task()
|
||||
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||
WorkflowService.populate_form_with_random_data(task, task_api)
|
||||
self.assertTrue(isinstance(task.data["sponsor"], dict))
|
|
@ -1,9 +1,9 @@
|
|||
import json
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import session
|
||||
from crc.models.file import FileModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestWorkflowSpec(BaseTest):
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import json
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.file import FileModel
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
||||
from crc.models.workflow import WorkflowSpecModel
|
||||
|
||||
|
||||
class TestWorkflowSpecValidation(BaseTest):
|
||||
|
||||
|
@ -20,22 +20,8 @@ class TestWorkflowSpecValidation(BaseTest):
|
|||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
return ApiErrorSchema(many=True).load(json_data)
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
||||
def test_successful_validation_of_test_workflows(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
||||
|
||||
# Mock Protocol Builder responses
|
||||
studies_response = self.protocol_builder_response('user_studies.json')
|
||||
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
||||
details_response = self.protocol_builder_response('study_details.json')
|
||||
mock_details.return_value = json.loads(details_response)
|
||||
docs_response = self.protocol_builder_response('required_docs.json')
|
||||
mock_docs.return_value = json.loads(docs_response)
|
||||
investigators_response = self.protocol_builder_response('investigators.json')
|
||||
mock_investigators.return_value = json.loads(investigators_response)
|
||||
|
||||
def test_successful_validation_of_test_workflows(self):
|
||||
app.config['PB_ENABLED'] = False # Assure this is disabled.
|
||||
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
|
||||
self.assertEqual(0, len(self.validate_workflow("decision_table")))
|
||||
self.assertEqual(0, len(self.validate_workflow("docx")))
|
||||
|
@ -49,7 +35,7 @@ class TestWorkflowSpecValidation(BaseTest):
|
|||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
||||
def test_successful_validation_of_auto_loaded_workflows(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
||||
def test_successful_validation_of_crc_workflows(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
||||
|
||||
# Mock Protocol Builder responses
|
||||
studies_response = self.protocol_builder_response('user_studies.json')
|
||||
|
@ -61,7 +47,8 @@ class TestWorkflowSpecValidation(BaseTest):
|
|||
investigators_response = self.protocol_builder_response('investigators.json')
|
||||
mock_investigators.return_value = json.loads(investigators_response)
|
||||
|
||||
self.load_example_data()
|
||||
self.load_example_data(use_crc_data=True)
|
||||
app.config['PB_ENABLED'] = True
|
||||
workflows = session.query(WorkflowSpecModel).all()
|
||||
errors = []
|
||||
for w in workflows:
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
from werkzeug.exceptions import NotFound
|
||||
from werkzeug.middleware.dispatcher import DispatcherMiddleware
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
from crc import app
|
||||
|
||||
if __name__ == "__main__":
|
||||
def no_app(environ, start_response):
|
||||
return NotFound()(environ, start_response)
|
||||
|
||||
# Remove trailing slash, but add leading slash
|
||||
base_url = '/' + app.config['APPLICATION_ROOT'].strip('/')
|
||||
routes = {'/': app.wsgi_app}
|
||||
|
||||
if base_url != '/':
|
||||
routes[base_url] = app.wsgi_app
|
||||
|
||||
app.wsgi_app = DispatcherMiddleware(no_app, routes)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app)
|
||||
|
||||
flask_port = app.config['FLASK_PORT']
|
||||
|
||||
app.run(host='0.0.0.0', port=flask_port)
|
Loading…
Reference in New Issue