mirror of
https://github.com/sartography/cr-connect-workflow.git
synced 2025-02-20 11:48:16 +00:00
Merge branch 'rrt/dev' into feature/emails-enhancement
This commit is contained in:
commit
e947f40ec7
12
.travis.yml
12
.travis.yml
@ -13,16 +13,16 @@ addons:
|
||||
organization: "sartography"
|
||||
|
||||
before_install:
|
||||
- cp config/travis-testing.py config/testing.py
|
||||
- psql -c 'create database crc_test;' -U postgres
|
||||
|
||||
install:
|
||||
- pip install pipenv pytest coverage awscli
|
||||
- export PATH=$PATH:$HOME/.local/bin;
|
||||
- pipenv install
|
||||
- pipenv install --dev
|
||||
|
||||
env:
|
||||
- PB_BASE_URL='http://workflow.sartography.com:5001/pb/'
|
||||
global:
|
||||
- TESTING=true
|
||||
- PB_ENABLED=false
|
||||
- SQLALCHEMY_DATABASE_URI="postgresql://postgres:@localhost:5432/crc_test"
|
||||
|
||||
script:
|
||||
- pipenv run coverage run -m pytest
|
||||
@ -33,7 +33,7 @@ after_success:
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: bash deploy.sh
|
||||
script: bash deploy.sh sartography/cr-connect-workflow
|
||||
skip_cleanup: true
|
||||
on:
|
||||
all_branches: true
|
||||
|
@ -1,21 +1,14 @@
|
||||
FROM python:3.7-slim
|
||||
FROM sartography/cr-connect-python-base
|
||||
|
||||
WORKDIR /app
|
||||
COPY Pipfile Pipfile.lock /app/
|
||||
|
||||
RUN set -xe \
|
||||
&& pip install pipenv \
|
||||
&& apt-get update -q \
|
||||
&& apt-get install -y -q \
|
||||
gcc python3-dev libssl-dev \
|
||||
curl postgresql-client git-core \
|
||||
gunicorn3 postgresql-client \
|
||||
&& pipenv install --dev \
|
||||
&& apt-get remove -y gcc python3-dev libssl-dev \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /app \
|
||||
&& useradd _gunicorn --no-create-home --user-group
|
||||
|
||||
COPY . /app/
|
||||
|
1
Pipfile
1
Pipfile
@ -6,6 +6,7 @@ verify_ssl = true
|
||||
[dev-packages]
|
||||
pytest = "*"
|
||||
pbr = "*"
|
||||
coverage = "*"
|
||||
|
||||
[packages]
|
||||
connexion = {extras = ["swagger-ui"],version = "*"}
|
||||
|
37
Pipfile.lock
generated
37
Pipfile.lock
generated
@ -930,6 +930,43 @@
|
||||
],
|
||||
"version": "==19.3.0"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a",
|
||||
"sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355",
|
||||
"sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65",
|
||||
"sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7",
|
||||
"sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9",
|
||||
"sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1",
|
||||
"sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0",
|
||||
"sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55",
|
||||
"sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c",
|
||||
"sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6",
|
||||
"sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef",
|
||||
"sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019",
|
||||
"sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e",
|
||||
"sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0",
|
||||
"sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf",
|
||||
"sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24",
|
||||
"sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2",
|
||||
"sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c",
|
||||
"sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4",
|
||||
"sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0",
|
||||
"sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd",
|
||||
"sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04",
|
||||
"sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e",
|
||||
"sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730",
|
||||
"sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2",
|
||||
"sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768",
|
||||
"sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796",
|
||||
"sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7",
|
||||
"sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a",
|
||||
"sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489",
|
||||
"sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.1"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
|
||||
|
@ -9,9 +9,10 @@ JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values
|
||||
NAME = "CR Connect Workflow"
|
||||
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default="5000")
|
||||
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default="localhost:4200, localhost:5002"))
|
||||
DEVELOPMENT = environ.get('DEVELOPMENT', default="true") == "true"
|
||||
TESTING = environ.get('TESTING', default="false") == "true"
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true") or (not DEVELOPMENT and not TESTING)
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
|
||||
TEST_UID = environ.get('TEST_UID', default="dhf8r")
|
||||
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah3us,cl3wf"))
|
||||
|
||||
# Sentry flag
|
||||
ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
|
||||
@ -28,14 +29,14 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
||||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
TOKEN_AUTH_TTL_HOURS = int(environ.get('TOKEN_AUTH_TTL_HOURS', default=4))
|
||||
TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
|
||||
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
|
||||
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
|
||||
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
|
||||
|
||||
# %s/%i placeholders expected for uva_id and study_id in various calls.
|
||||
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/pb/").strip('/') + '/' # Trailing slash required
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/v2.0/").strip('/') + '/' # Trailing slash required
|
||||
PB_USER_STUDIES_URL = environ.get('PB_USER_STUDIES_URL', default=PB_BASE_URL + "user_studies?uva_id=%s")
|
||||
PB_INVESTIGATORS_URL = environ.get('PB_INVESTIGATORS_URL', default=PB_BASE_URL + "investigators?studyid=%i")
|
||||
PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL + "required_docs?studyid=%i")
|
||||
@ -51,6 +52,6 @@ MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
|
||||
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
|
||||
MAIL_PORT = environ.get('MAIL_PORT', default=2525)
|
||||
MAIL_USE_SSL = environ.get('MAIL_USE_SSL', default=False)
|
||||
MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=True)
|
||||
MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=False)
|
||||
MAIL_USERNAME = environ.get('MAIL_USERNAME', default='')
|
||||
MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='')
|
||||
|
@ -4,16 +4,15 @@ from os import environ
|
||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
NAME = "CR Connect Workflow"
|
||||
DEVELOPMENT = True
|
||||
TESTING = True
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
PB_ENABLED = False
|
||||
|
||||
# This is here, for when we are running the E2E Tests in the frontend code bases.
|
||||
# which will set the TESTING envronment to true, causing this to execute, but we need
|
||||
# to respect the environment variables in that case.
|
||||
# when running locally the defaults apply, meaning we use crc_test for doing the tests
|
||||
# locally, and we don't over-write the database. Did you read this far? Have a cookie!
|
||||
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
|
||||
DB_HOST = environ.get('DB_HOST', default="localhost")
|
||||
DB_PORT = environ.get('DB_PORT', default="5432")
|
||||
DB_NAME = environ.get('DB_NAME', default="crc_test")
|
||||
@ -23,8 +22,8 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
||||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
ADMIN_UIDS = ['dhf8r']
|
||||
|
||||
print('### USING TESTING CONFIG: ###')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
||||
print('TESTING = ', TESTING)
|
||||
|
@ -1,17 +0,0 @@
|
||||
import os
|
||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
NAME = "CR Connect Workflow"
|
||||
DEVELOPMENT = True
|
||||
TESTING = True
|
||||
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
|
||||
TOKEN_AUTH_TTL_HOURS = 2
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
FRONTEND_AUTH_CALLBACK = "http://localhost:4200/session" # Not Required
|
||||
PB_ENABLED = False
|
||||
|
||||
print('+++ USING TRAVIS TESTING CONFIG: +++')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
||||
print('TESTING = ', TESTING)
|
||||
print('FRONTEND_AUTH_CALLBACK = ', FRONTEND_AUTH_CALLBACK)
|
@ -57,15 +57,16 @@ env = Environment(loader=FileSystemLoader(template_dir))
|
||||
mail = Mail(app)
|
||||
|
||||
print('=== USING THESE CONFIG SETTINGS: ===')
|
||||
print('DB_HOST = ', )
|
||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||
print('DEVELOPMENT = ', app.config['DEVELOPMENT'])
|
||||
print('TESTING = ', app.config['TESTING'])
|
||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||
print('DB_HOST = ', app.config['DB_HOST'])
|
||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||
print('PB_ENABLED = ', app.config['PB_ENABLED'])
|
||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||
print('TESTING = ', app.config['TESTING'])
|
||||
print('TEST_UID = ', app.config['TEST_UID'])
|
||||
print('ADMIN_UIDS = ', app.config['ADMIN_UIDS'])
|
||||
|
||||
@app.cli.command()
|
||||
def load_example_data():
|
||||
|
65
crc/api.yml
65
crc/api.yml
@ -9,54 +9,18 @@ servers:
|
||||
security:
|
||||
- jwt: ['secret']
|
||||
paths:
|
||||
/sso_backdoor:
|
||||
/login:
|
||||
get:
|
||||
operationId: crc.api.user.backdoor
|
||||
summary: A backdoor that allows someone to log in as a specific user, if they
|
||||
are in a staging environment.
|
||||
operationId: crc.api.user.login
|
||||
summary: In production, logs the user in via SSO. If not in production, logs in as a specific user for testing.
|
||||
security: [] # Disable security for this endpoint only.
|
||||
parameters:
|
||||
- name: uid
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: email_address
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: display_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: affiliation
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: eppn
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: first_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: last_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: title
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: redirect
|
||||
- name: redirect_url
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
@ -150,6 +114,8 @@ paths:
|
||||
$ref: "#/components/schemas/Study"
|
||||
delete:
|
||||
operationId: crc.api.study.delete_study
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes the given study completely.
|
||||
tags:
|
||||
- Studies
|
||||
@ -251,6 +217,8 @@ paths:
|
||||
$ref: "#/components/schemas/WorkflowSpec"
|
||||
put:
|
||||
operationId: crc.api.workflow.update_workflow_specification
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Modifies an existing workflow specification with the given parameters.
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
@ -268,6 +236,8 @@ paths:
|
||||
$ref: "#/components/schemas/WorkflowSpec"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow_specification
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow specification
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
@ -313,6 +283,8 @@ paths:
|
||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
post:
|
||||
operationId: crc.api.workflow.add_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Creates a new workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
@ -350,6 +322,8 @@ paths:
|
||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
put:
|
||||
operationId: crc.api.workflow.update_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Modifies an existing workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
@ -367,6 +341,8 @@ paths:
|
||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow spec category
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
@ -566,6 +542,8 @@ paths:
|
||||
example: '<?xml version="1.0" encoding="UTF-8"?><bpmn:definitions></bpmn:definitions>'
|
||||
put:
|
||||
operationId: crc.api.file.set_reference_file
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Update the contents of a named reference file.
|
||||
tags:
|
||||
- Files
|
||||
@ -624,6 +602,8 @@ paths:
|
||||
$ref: "#/components/schemas/Workflow"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow
|
||||
tags:
|
||||
- Workflows and Tasks
|
||||
@ -944,6 +924,11 @@ components:
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
x-bearerInfoFunc: crc.api.user.verify_token
|
||||
auth_admin:
|
||||
type: http
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
x-bearerInfoFunc: crc.api.user.verify_token_admin
|
||||
schemas:
|
||||
User:
|
||||
properties:
|
||||
|
@ -24,7 +24,6 @@ def get_approval_counts(as_user=None):
|
||||
.all()
|
||||
|
||||
study_ids = [a.study_id for a in db_user_approvals]
|
||||
print('study_ids', study_ids)
|
||||
|
||||
db_other_approvals = db.session.query(ApprovalModel)\
|
||||
.filter(ApprovalModel.study_id.in_(study_ids))\
|
||||
@ -39,8 +38,8 @@ def get_approval_counts(as_user=None):
|
||||
other_approvals[approval.study_id] = approval
|
||||
|
||||
counts = {}
|
||||
for status in ApprovalStatus:
|
||||
counts[status.name] = 0
|
||||
for name, value in ApprovalStatus.__members__.items():
|
||||
counts[name] = 0
|
||||
|
||||
for approval in db_user_approvals:
|
||||
# Check if another approval has the same study id
|
||||
@ -57,6 +56,8 @@ def get_approval_counts(as_user=None):
|
||||
counts[ApprovalStatus.CANCELED.name] += 1
|
||||
elif other_approval.status == ApprovalStatus.APPROVED.name:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
|
||||
|
252
crc/api/user.py
252
crc/api/user.py
@ -10,29 +10,113 @@ from crc.services.ldap_service import LdapService, LdapModel
|
||||
.. module:: crc.api.user
|
||||
:synopsis: Single Sign On (SSO) user login and session handlers
|
||||
"""
|
||||
def verify_token(token):
|
||||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate", status_code=403)
|
||||
if (not 'PRODUCTION' in app.config or not app.config['PRODUCTION']) and token == app.config["SWAGGER_AUTH_KEY"]:
|
||||
|
||||
|
||||
def verify_token(token=None):
|
||||
"""
|
||||
Verifies the token for the user (if provided). If in production environment and token is not provided,
|
||||
gets user from the SSO headers and returns their token.
|
||||
|
||||
Args:
|
||||
token: Optional[str]
|
||||
|
||||
Returns:
|
||||
token: str
|
||||
|
||||
Raises:
|
||||
ApiError. If not on production and token is not valid, returns an 'invalid_token' 403 error.
|
||||
If on production and user is not authenticated, returns a 'no_user' 403 error.
|
||||
"""
|
||||
|
||||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
|
||||
status_code=403)
|
||||
|
||||
if not _is_production() and (token is None or 'user' not in g):
|
||||
g.user = UserModel.query.first()
|
||||
token = g.user.encode_auth_token()
|
||||
|
||||
try:
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
||||
except:
|
||||
raise failure_error
|
||||
if g.user is not None:
|
||||
return token_info
|
||||
if token:
|
||||
try:
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
||||
except:
|
||||
raise failure_error
|
||||
if g.user is not None:
|
||||
return token_info
|
||||
else:
|
||||
raise failure_error
|
||||
|
||||
# If there's no token and we're in production, get the user from the SSO headers and return their token
|
||||
if not token and _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid is not None:
|
||||
db_user = UserModel.query.filter_by(uid=uid).first()
|
||||
|
||||
if db_user is not None:
|
||||
g.user = db_user
|
||||
token = g.user.encode_auth_token().decode()
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
return token_info
|
||||
|
||||
else:
|
||||
raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.",
|
||||
status_code=403)
|
||||
|
||||
|
||||
def verify_token_admin(token=None):
|
||||
"""
|
||||
Verifies the token for the user (if provided) in non-production environment. If in production environment,
|
||||
checks that the user is in the list of authorized admins
|
||||
|
||||
Args:
|
||||
token: Optional[str]
|
||||
|
||||
Returns:
|
||||
token: str
|
||||
"""
|
||||
|
||||
# If this is production, check that the user is in the list of admins
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid is not None and uid in app.config['ADMIN_UIDS']:
|
||||
return verify_token()
|
||||
|
||||
# If we're not in production, just use the normal verify_token method
|
||||
else:
|
||||
raise failure_error
|
||||
return verify_token(token)
|
||||
|
||||
|
||||
def get_current_user():
|
||||
return UserModelSchema().dump(g.user)
|
||||
|
||||
@app.route('/v1.0/login')
|
||||
def sso_login():
|
||||
# This what I see coming back:
|
||||
|
||||
def login(
|
||||
uid=None,
|
||||
redirect_url=None,
|
||||
):
|
||||
"""
|
||||
In non-production environment, provides an endpoint for end-to-end system testing that allows the system
|
||||
to simulate logging in as a specific user. In production environment, simply logs user in via single-sign-on
|
||||
(SSO) Shibboleth authentication headers.
|
||||
|
||||
Args:
|
||||
uid: Optional[str]
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
||||
If on production and user is authenticated via SSO, returns the frontend auth callback URL,
|
||||
with auth token appended.
|
||||
|
||||
Raises:
|
||||
ApiError. If on production and user is not authenticated, returns a 404 error.
|
||||
"""
|
||||
|
||||
# ----------------------------------------
|
||||
# Shibboleth Authentication Headers
|
||||
# ----------------------------------------
|
||||
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
|
||||
# X-Remote-Sn: Funk
|
||||
# X-Remote-Givenname: Daniel
|
||||
@ -47,59 +131,52 @@ def sso_login():
|
||||
# X-Forwarded-Host: dev.crconnect.uvadcos.io
|
||||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||
# Connection: Keep-Alive
|
||||
uid = request.headers.get("Uid")
|
||||
if not uid:
|
||||
uid = request.headers.get("X-Remote-Uid")
|
||||
|
||||
if not uid:
|
||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||
% str(request.headers))
|
||||
|
||||
redirect = request.args.get('redirect')
|
||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
|
||||
info = LdapService.user_info(uid)
|
||||
return _handle_login(info, redirect)
|
||||
# If we're in production, override any uid with the uid from the SSO request headers
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid:
|
||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect_url))
|
||||
|
||||
ldap_info = LdapService().user_info(uid)
|
||||
|
||||
if ldap_info:
|
||||
return _handle_login(ldap_info, redirect_url)
|
||||
|
||||
raise ApiError('404', 'unknown')
|
||||
|
||||
|
||||
@app.route('/sso')
|
||||
def sso():
|
||||
response = ""
|
||||
response += "<h1>Headers</h1>"
|
||||
response += "<ul>"
|
||||
for k,v in request.headers:
|
||||
for k, v in request.headers:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
response += "<h1>Environment</h1>"
|
||||
for k,v in request.environ:
|
||||
for k, v in request.environ:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
return response
|
||||
|
||||
|
||||
def _handle_login(user_info: LdapModel, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
||||
"""On successful login, adds user to database if the user is not already in the system,
|
||||
then returns the frontend auth callback URL, with auth token appended.
|
||||
def _handle_login(user_info: LdapModel, redirect_url=None):
|
||||
"""
|
||||
On successful login, adds user to database if the user is not already in the system,
|
||||
then returns the frontend auth callback URL, with auth token appended.
|
||||
|
||||
Args:
|
||||
user_info - an ldap user_info object.
|
||||
redirect_url: Optional[str]
|
||||
Args:
|
||||
user_info - an ldap user_info object.
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
Returns:
|
||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
"""
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||
|
||||
if user is None:
|
||||
# Add new user
|
||||
user = UserModel()
|
||||
|
||||
user.uid = user_info.uid
|
||||
user.display_name = user_info.display_name
|
||||
user.email_address = user_info.email_address
|
||||
user.affiliation = user_info.affiliation
|
||||
user.title = user_info.title
|
||||
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
user = _upsert_user(user_info)
|
||||
g.user = user
|
||||
|
||||
# Return the frontend auth callback URL, with auth token appended.
|
||||
auth_token = user.encode_auth_token().decode()
|
||||
@ -114,41 +191,44 @@ def _handle_login(user_info: LdapModel, redirect_url=app.config['FRONTEND_AUTH_C
|
||||
return auth_token
|
||||
|
||||
|
||||
def _upsert_user(user_info):
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||
|
||||
def backdoor(
|
||||
uid=None,
|
||||
affiliation=None,
|
||||
display_name=None,
|
||||
email_address=None,
|
||||
eppn=None,
|
||||
first_name=None,
|
||||
last_name=None,
|
||||
title=None,
|
||||
redirect=None,
|
||||
):
|
||||
"""A backdoor for end-to-end system testing that allows the system to simulate logging in as a specific user.
|
||||
Only works if the application is running in a non-production environment.
|
||||
|
||||
Args:
|
||||
uid: str
|
||||
affiliation: Optional[str]
|
||||
display_name: Optional[str]
|
||||
email_address: Optional[str]
|
||||
eppn: Optional[str]
|
||||
first_name: Optional[str]
|
||||
last_name: Optional[str]
|
||||
title: Optional[str]
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
||||
|
||||
Raises:
|
||||
ApiError. If on production, returns a 404 error.
|
||||
"""
|
||||
if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
|
||||
|
||||
ldap_info = LdapService.user_info(uid)
|
||||
return _handle_login(ldap_info, redirect)
|
||||
if user is None:
|
||||
# Add new user
|
||||
user = UserModel()
|
||||
else:
|
||||
raise ApiError('404', 'unknown')
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first()
|
||||
|
||||
user.uid = user_info.uid
|
||||
user.display_name = user_info.display_name
|
||||
user.email_address = user_info.email_address
|
||||
user.affiliation = user_info.affiliation
|
||||
user.title = user_info.title
|
||||
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
return user
|
||||
|
||||
|
||||
def _get_request_uid(req):
|
||||
uid = None
|
||||
|
||||
if _is_production():
|
||||
|
||||
if 'user' in g and g.user is not None:
|
||||
return g.user.uid
|
||||
|
||||
uid = req.headers.get("Uid")
|
||||
if not uid:
|
||||
uid = req.headers.get("X-Remote-Uid")
|
||||
|
||||
if not uid:
|
||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||
% str(req.headers))
|
||||
|
||||
return uid
|
||||
|
||||
|
||||
def _is_production():
|
||||
return 'PRODUCTION' in app.config and app.config['PRODUCTION']
|
||||
|
@ -1,6 +1,8 @@
|
||||
import uuid
|
||||
|
||||
from crc import session
|
||||
from flask import g
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
from crc.models.file import FileModel, LookupDataSchema
|
||||
@ -129,7 +131,7 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
||||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=processor.workflow_model.total_tasks,
|
||||
total_tasks=len(navigation),
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
last_updated=processor.workflow_model.last_updated,
|
||||
title=spec.display_name
|
||||
@ -156,6 +158,7 @@ def delete_workflow(workflow_id):
|
||||
|
||||
def set_current_task(workflow_id, task_id):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
@ -167,13 +170,21 @@ def set_current_task(workflow_id, task_id):
|
||||
if task.state == task.COMPLETED:
|
||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
def update_task(workflow_id, task_id, body):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
|
||||
if workflow_model is None:
|
||||
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
|
||||
|
||||
elif workflow_model.study is None:
|
||||
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
|
||||
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
@ -184,7 +195,7 @@ def update_task(workflow_id, task_id, body):
|
||||
processor.complete_task(task)
|
||||
processor.do_engine_steps()
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
@ -239,3 +250,14 @@ def lookup(workflow_id, field_id, query, limit):
|
||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
||||
|
||||
|
||||
def __get_user_uid(user_uid):
|
||||
if 'user' in g:
|
||||
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||
else:
|
||||
return g.user.uid
|
||||
|
||||
else:
|
||||
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
|
||||
|
@ -16,7 +16,8 @@ class MultiInstanceType(enum.Enum):
|
||||
|
||||
|
||||
class NavigationItem(object):
|
||||
def __init__(self, id, task_id, name, title, backtracks, level, indent, child_count, state, is_decision, task=None):
|
||||
def __init__(self, id, task_id, name, title, backtracks, level, indent, child_count, state, is_decision,
|
||||
task=None, lane=None):
|
||||
self.id = id
|
||||
self.task_id = task_id
|
||||
self.name = name,
|
||||
|
@ -19,7 +19,7 @@ class UserModel(db.Model):
|
||||
last_name = db.Column(db.String, nullable=True)
|
||||
title = db.Column(db.String, nullable=True)
|
||||
|
||||
# Add Department and School
|
||||
# TODO: Add Department and School
|
||||
|
||||
|
||||
def encode_auth_token(self):
|
||||
@ -27,7 +27,7 @@ class UserModel(db.Model):
|
||||
Generates the Auth Token
|
||||
:return: string
|
||||
"""
|
||||
hours = int(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
hours = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
payload = {
|
||||
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=hours, minutes=0, seconds=0),
|
||||
'iat': datetime.datetime.utcnow(),
|
||||
@ -36,7 +36,7 @@ class UserModel(db.Model):
|
||||
return jwt.encode(
|
||||
payload,
|
||||
app.config.get('TOKEN_AUTH_SECRET_KEY'),
|
||||
algorithm='HS256'
|
||||
algorithm='HS256',
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@ -50,9 +50,9 @@ class UserModel(db.Model):
|
||||
payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256')
|
||||
return payload
|
||||
except jwt.ExpiredSignatureError:
|
||||
raise ApiError('token_expired', 'The Authentication token you provided expired, and must be renewed.')
|
||||
raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.')
|
||||
except jwt.InvalidTokenError:
|
||||
raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ')
|
||||
raise ApiError('token_invalid', 'The Authentication token you provided is invalid. You need a new token. ')
|
||||
|
||||
|
||||
class UserModelSchema(SQLAlchemyAutoSchema):
|
||||
|
@ -24,7 +24,7 @@ class LdapService(object):
|
||||
@staticmethod
|
||||
def __get_conn():
|
||||
if not LdapService.conn:
|
||||
if app.config['TESTING']:
|
||||
if app.config['TESTING'] or app.config['LDAP_URL'] == 'mock':
|
||||
server = Server('my_fake_server')
|
||||
conn = Connection(server, client_strategy=MOCK_SYNC)
|
||||
file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
|
||||
|
@ -1,4 +1,5 @@
|
||||
import json
|
||||
from json import JSONDecodeError
|
||||
from typing import List, Optional
|
||||
|
||||
import requests
|
||||
@ -26,10 +27,17 @@ class ProtocolBuilderService(object):
|
||||
ProtocolBuilderService.__enabled_or_raise()
|
||||
if not isinstance(user_id, str):
|
||||
raise ApiError("protocol_builder_error", "This user id is invalid: " + str(user_id))
|
||||
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
|
||||
url = ProtocolBuilderService.STUDY_URL % user_id
|
||||
response = requests.get(url)
|
||||
if response.ok and response.text:
|
||||
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
||||
return pb_studies
|
||||
try:
|
||||
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
||||
return pb_studies
|
||||
except JSONDecodeError as err:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder. The response is not "
|
||||
"valid json. Url: %s, Response: %s, error: %s" %
|
||||
(url, response.text, err.msg))
|
||||
else:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder (status %s): %s" %
|
||||
|
@ -86,8 +86,8 @@ class StudyService(object):
|
||||
def delete_workflow(workflow):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||
FileService.delete_file(file.id)
|
||||
for deb in workflow.dependencies:
|
||||
session.delete(deb)
|
||||
for dep in workflow.dependencies:
|
||||
session.delete(dep)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||
|
||||
@ -133,7 +133,7 @@ class StudyService(object):
|
||||
that is available.."""
|
||||
|
||||
# Get PB required docs, if Protocol Builder Service is enabled.
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
if ProtocolBuilderService.is_enabled() and study_id is not None:
|
||||
try:
|
||||
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
|
||||
except requests.exceptions.ConnectionError as ce:
|
||||
|
@ -58,7 +58,7 @@ class WorkflowService(object):
|
||||
|
||||
@staticmethod
|
||||
def delete_test_data():
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
|
||||
StudyService.delete_study(study.id)
|
||||
db.session.commit()
|
||||
|
||||
@ -317,12 +317,12 @@ class WorkflowService(object):
|
||||
field.options.append({"id": d.value, "name": d.label})
|
||||
|
||||
@staticmethod
|
||||
def log_task_action(processor, spiff_task, action):
|
||||
def log_task_action(user_uid, processor, spiff_task, action):
|
||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||
workflow_model = processor.workflow_model
|
||||
task_event = TaskEventModel(
|
||||
study_id=workflow_model.study_id,
|
||||
user_uid=g.user.uid,
|
||||
user_uid=user_uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
|
49
deploy.sh
49
deploy.sh
@ -1,44 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
#########################################################################
|
||||
# Builds the Docker image for the current git branch on Travis CI and
|
||||
# publishes it to Docker Hub.
|
||||
#
|
||||
# Parameters:
|
||||
# $1: Docker Hub repository to publish to
|
||||
#
|
||||
# Required environment variables (place in Settings menu on Travis CI):
|
||||
# $DOCKER_USERNAME: Docker Hub username
|
||||
# $DOCKER_TOKEN: Docker Hub access token
|
||||
#########################################################################
|
||||
|
||||
echo 'Building Docker image...'
|
||||
DOCKER_REPO="$1"
|
||||
|
||||
function branch_to_tag () {
|
||||
if [ "$1" == "latest" ]; then echo "production"; else echo "$1" ; fi
|
||||
if [ "$1" == "master" ]; then echo "latest"; else echo "$1" ; fi
|
||||
}
|
||||
|
||||
function branch_to_deploy_group() {
|
||||
if [[ $1 =~ ^(rrt\/.*)$ ]]; then echo "rrt"; else echo "crconnect" ; fi
|
||||
}
|
||||
|
||||
function branch_to_deploy_stage () {
|
||||
if [ "$1" == "master" ]; then echo "production"; else echo "$1" ; fi
|
||||
}
|
||||
DOCKER_TAG=$(branch_to_tag "$TRAVIS_BRANCH")
|
||||
|
||||
REPO="sartography/cr-connect-workflow"
|
||||
TAG=$(branch_to_tag "$TRAVIS_BRANCH")
|
||||
|
||||
DEPLOY_APP="backend"
|
||||
DEPLOY_GROUP=$(branch_to_deploy_group "$TRAVIS_BRANCH")
|
||||
DEPLOY_STAGE=$(branch_to_deploy_stage "$TRAVIS_BRANCH")
|
||||
|
||||
if [ "$DEPLOY_GROUP" == "rrt" ]; then
|
||||
IFS='/' read -ra ARR <<< "$TRAVIS_BRANCH" # Split branch on '/' character
|
||||
TAG=$(branch_to_tag "rrt_${ARR[1]}")
|
||||
DEPLOY_STAGE=$(branch_to_deploy_stage "${ARR[1]}")
|
||||
DOCKER_TAG=$(branch_to_tag "rrt_${ARR[1]}")
|
||||
fi
|
||||
|
||||
DEPLOY_PATH="$DEPLOY_GROUP/$DEPLOY_STAGE/$DEPLOY_APP"
|
||||
echo "REPO = $REPO"
|
||||
echo "TAG = $TAG"
|
||||
echo "DEPLOY_PATH = $DEPLOY_PATH"
|
||||
echo "DOCKER_REPO = $DOCKER_REPO"
|
||||
echo "DOCKER_TAG = $DOCKER_TAG"
|
||||
|
||||
# Build and push Docker image to Docker Hub
|
||||
echo "$DOCKER_TOKEN" | docker login -u "$DOCKER_USERNAME" --password-stdin || exit 1
|
||||
docker build -f Dockerfile -t "$REPO:$TAG" . || exit 1
|
||||
docker push "$REPO" || exit 1
|
||||
docker build -f Dockerfile -t "$DOCKER_REPO:$DOCKER_TAG" . || exit 1
|
||||
|
||||
# Wait for Docker Hub
|
||||
|
||||
# Push Docker image to Docker Hub
|
||||
echo "Publishing to Docker Hub..."
|
||||
sleep 30
|
||||
|
||||
# Notify UVA DCOS that Docker image has been updated
|
||||
echo "Refreshing DC/OS..."
|
||||
aws sqs send-message --region "$AWS_DEFAULT_REGION" --queue-url "$AWS_SQS_URL" --message-body "$DEPLOY_PATH" || exit 1
|
||||
docker push "$DOCKER_REPO" || exit 1
|
||||
echo "Done."
|
||||
|
@ -1,53 +1,39 @@
|
||||
import json
|
||||
from tests.base_test import BaseTest
|
||||
import random
|
||||
import string
|
||||
|
||||
from flask import g
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import session, db
|
||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
class TestApprovals(BaseTest):
|
||||
def setUp(self):
|
||||
"""Initial setup shared by all TestApprovals tests"""
|
||||
self.load_example_data()
|
||||
self.study = self.create_study()
|
||||
self.workflow = self.create_workflow('random_fact')
|
||||
self.unrelated_study = StudyModel(title="second study",
|
||||
protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid="dhf8r", primary_investigator_id="dhf8r")
|
||||
self.unrelated_workflow = self.create_workflow('random_fact', study=self.unrelated_study)
|
||||
|
||||
# TODO: Move to base_test as a helper
|
||||
self.approval = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 2 approvers
|
||||
study_workflow_approvals_1 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp",
|
||||
approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval)
|
||||
self.study = study_workflow_approvals_1['study']
|
||||
self.workflow = study_workflow_approvals_1['workflow']
|
||||
self.approval = study_workflow_approvals_1['approvals'][0]
|
||||
self.approval_2 = study_workflow_approvals_1['approvals'][1]
|
||||
|
||||
self.approval_2 = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='dhf8r',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 1 approver
|
||||
study_workflow_approvals_2 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="second study", primary_investigator_id="dhf8r",
|
||||
approver_uids=["lb3dp"], statuses=[ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval_2)
|
||||
|
||||
# A third study, unrelated to the first.
|
||||
self.approval_3 = ApprovalModel(
|
||||
study=self.unrelated_study,
|
||||
workflow=self.unrelated_workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
)
|
||||
session.add(self.approval_3)
|
||||
|
||||
session.commit()
|
||||
self.unrelated_study = study_workflow_approvals_2['study']
|
||||
self.unrelated_workflow = study_workflow_approvals_2['workflow']
|
||||
self.approval_3 = study_workflow_approvals_2['approvals'][0]
|
||||
|
||||
def test_list_approvals_per_approver(self):
|
||||
"""Only approvals associated with approver should be returned"""
|
||||
@ -85,7 +71,7 @@ class TestApprovals(BaseTest):
|
||||
response = json.loads(rv.get_data(as_text=True))
|
||||
response_count = len(response)
|
||||
self.assertEqual(1, response_count)
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
|
||||
def test_update_approval_fails_if_not_the_approver(self):
|
||||
approval = session.query(ApprovalModel).filter_by(approver_uid='lb3dp').first()
|
||||
@ -145,9 +131,130 @@ class TestApprovals(BaseTest):
|
||||
self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
|
||||
|
||||
def test_csv_export(self):
|
||||
approvals = db.session.query(ApprovalModel).all()
|
||||
for app in approvals:
|
||||
app.status = ApprovalStatus.APPROVED.value
|
||||
db.session.commit()
|
||||
self.load_test_spec('two_forms')
|
||||
self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
|
||||
|
||||
# Get all workflows
|
||||
workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
|
||||
|
||||
# For each workflow, complete all tasks
|
||||
for workflow in workflows:
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
|
||||
|
||||
# Log current user out.
|
||||
g.user = None
|
||||
self.assertIsNone(g.user)
|
||||
|
||||
# Complete the form for Step one and post it.
|
||||
self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
|
||||
|
||||
# Get the next Task
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual("StepTwo", workflow_api.next_task.name)
|
||||
|
||||
# Get all user Tasks and check that the data have been saved
|
||||
task = workflow_api.next_task
|
||||
self.assertIsNotNone(task.data)
|
||||
for val in task.data.values():
|
||||
self.assertIsNotNone(val)
|
||||
|
||||
rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_all_approvals(self):
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
not_canceled = session.query(ApprovalModel).filter(ApprovalModel.status != 'CANCELED').all()
|
||||
not_canceled_study_ids = []
|
||||
for a in not_canceled:
|
||||
if a.study_id not in not_canceled_study_ids:
|
||||
not_canceled_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=false', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(not_canceled_study_ids), 'Should return all non-canceled approvals, grouped by study')
|
||||
|
||||
all_approvals = session.query(ApprovalModel).all()
|
||||
all_approvals_study_ids = []
|
||||
for a in all_approvals:
|
||||
if a.study_id not in all_approvals_study_ids:
|
||||
all_approvals_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=true', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(all_approvals_study_ids), 'Should return all approvals, grouped by study')
|
||||
|
||||
def test_approvals_counts(self):
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
# Get the counts
|
||||
rv_counts = self.app.get(f'/v1.0/approval-counts', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_counts)
|
||||
counts = json.loads(rv_counts.get_data(as_text=True))
|
||||
|
||||
# Get the actual approvals
|
||||
rv_approvals = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_approvals)
|
||||
approvals = json.loads(rv_approvals.get_data(as_text=True))
|
||||
|
||||
# Tally up the number of approvals in each status category
|
||||
manual_counts = {}
|
||||
for status in statuses:
|
||||
manual_counts[status] = 0
|
||||
|
||||
for approval in approvals:
|
||||
manual_counts[approval['status']] += 1
|
||||
|
||||
# Numbers in each category should match
|
||||
for status in statuses:
|
||||
self.assertEqual(counts[status], manual_counts[status], 'Approval counts for status %s should match' % status)
|
||||
|
||||
# Total number of approvals should match
|
||||
total_counts = sum(counts[status] for status in statuses)
|
||||
self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
|
||||
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||
workflow_spec_name="random_fact"):
|
||||
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||
approvals = []
|
||||
|
||||
for i in range(len(approver_uids)):
|
||||
approvals.append(self.create_approval(
|
||||
study=study,
|
||||
workflow=workflow,
|
||||
approver_uid=approver_uids[i],
|
||||
status=statuses[i],
|
||||
version=1
|
||||
))
|
||||
|
||||
return {
|
||||
'study': study,
|
||||
'workflow': workflow,
|
||||
'approvals': approvals,
|
||||
}
|
||||
|
||||
def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
|
||||
num_studies_before = db.session.query(StudyModel).count()
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
|
||||
# Add a whole bunch of approvals with random statuses
|
||||
for i in range(n):
|
||||
approver_uids = random.choices(["lb3dp", "dhf8r"])
|
||||
self._create_study_workflow_approvals(
|
||||
user_uid=random.choice(["lb3dp", "dhf8r"]),
|
||||
title="".join(random.choices(string.ascii_lowercase, k=64)),
|
||||
primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
|
||||
approver_uids=approver_uids,
|
||||
statuses=random.choices(statuses, k=len(approver_uids)),
|
||||
workflow_spec_name=workflow_spec_name
|
||||
)
|
||||
|
||||
session.flush()
|
||||
num_studies_after = db.session.query(StudyModel).count()
|
||||
self.assertEqual(num_studies_after, num_studies_before + n)
|
||||
|
||||
|
@ -2,24 +2,27 @@
|
||||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import os
|
||||
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.models.user import UserModel
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
@ -95,7 +98,7 @@ class BaseTest(unittest.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
ExampleDataLoader.clean_db()
|
||||
session.flush()
|
||||
g.user = None
|
||||
self.auths = {}
|
||||
|
||||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||
@ -107,12 +110,16 @@ class BaseTest(unittest.TestCase):
|
||||
user_info = {'uid': user.uid}
|
||||
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
|
||||
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
||||
self.assertTrue(rv.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv.location, redirect_url))
|
||||
|
||||
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
||||
self.assertIsNotNone(user_model.display_name)
|
||||
self.assertEqual(user_model.uid, uid)
|
||||
self.assertTrue('user' in g, 'User should be in Flask globals')
|
||||
self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid')
|
||||
|
||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||
|
||||
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||
@ -159,6 +166,7 @@ class BaseTest(unittest.TestCase):
|
||||
@staticmethod
|
||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
@ -198,7 +206,7 @@ class BaseTest(unittest.TestCase):
|
||||
for key, value in items:
|
||||
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
||||
|
||||
query_string_list.append('redirect=%s' % redirect_url)
|
||||
query_string_list.append('redirect_url=%s' % redirect_url)
|
||||
|
||||
return '?%s' % '&'.join(query_string_list)
|
||||
|
||||
@ -222,12 +230,12 @@ class BaseTest(unittest.TestCase):
|
||||
db.session.commit()
|
||||
return user
|
||||
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
|
||||
study = session.query(StudyModel).first()
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
|
||||
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
|
||||
if study is None:
|
||||
user = self.create_user(uid=uid)
|
||||
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid=user.uid, primary_investigator_id='lb3dp')
|
||||
user_uid=user.uid, primary_investigator_id=primary_investigator_id)
|
||||
db.session.add(study)
|
||||
db.session.commit()
|
||||
return study
|
||||
@ -249,3 +257,97 @@ class BaseTest(unittest.TestCase):
|
||||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
def create_approval(
|
||||
self,
|
||||
study=None,
|
||||
workflow=None,
|
||||
approver_uid=None,
|
||||
status=None,
|
||||
version=None,
|
||||
):
|
||||
study = study or self.create_study()
|
||||
workflow = workflow or self.create_workflow()
|
||||
approver_uid = approver_uid or self.test_uid
|
||||
status = status or ApprovalStatus.PENDING.value
|
||||
version = version or 1
|
||||
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version)
|
||||
db.session.add(approval)
|
||||
db.session.commit()
|
||||
return approval
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(user),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(user=user),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual(user_uid, event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
|
||||
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.0">
|
||||
<bpmn:process id="Process_18biih5" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
|
||||
@ -21,10 +21,10 @@
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1lmkn99" sourceRef="Task_Has_Bananas" targetRef="ExclusiveGateway_003amsm" />
|
||||
<bpmn:exclusiveGateway id="ExclusiveGateway_003amsm" name="Has Bananas?">
|
||||
<bpmn:incoming>SequenceFlow_1lmkn99</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_Yes_Bananas</bpmn:outgoing>
|
||||
<bpmn:outgoing>SequenceFlow_A_Yes_Bananas</bpmn:outgoing>
|
||||
<bpmn:outgoing>SequenceFlow_No_Bananas</bpmn:outgoing>
|
||||
</bpmn:exclusiveGateway>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_Yes_Bananas" name="yes " sourceRef="ExclusiveGateway_003amsm" targetRef="Task_Num_Bananas">
|
||||
<bpmn:sequenceFlow id="SequenceFlow_A_Yes_Bananas" name="yes " sourceRef="ExclusiveGateway_003amsm" targetRef="Task_Num_Bananas">
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">has_bananas == True</bpmn:conditionExpression>
|
||||
</bpmn:sequenceFlow>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_No_Bananas" name="no" sourceRef="ExclusiveGateway_003amsm" targetRef="Task_Why_No_Bananas">
|
||||
@ -36,7 +36,7 @@
|
||||
<camunda:formField id="num_bananas" label="How Many Bananas do you have?" type="long" defaultValue="1" />
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>SequenceFlow_Yes_Bananas</bpmn:incoming>
|
||||
<bpmn:incoming>SequenceFlow_A_Yes_Bananas</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_02z84p5</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:userTask id="Task_Why_No_Bananas" name="Why no bananas" camunda:formKey="no_bananas">
|
||||
@ -75,7 +75,7 @@
|
||||
<dc:Bounds x="459" y="183" width="13" height="14" />
|
||||
</bpmndi:BPMNLabel>
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0f3vx1l_di" bpmnElement="SequenceFlow_Yes_Bananas">
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_0f3vx1l_di" bpmnElement="SequenceFlow_A_Yes_Bananas">
|
||||
<di:waypoint x="475" y="117" />
|
||||
<di:waypoint x="560" y="117" />
|
||||
<bpmndi:BPMNLabel>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_83c9f25" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_83c9f25" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.0">
|
||||
<bpmn:process id="Process_84bead4" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>Flow_1ux3ndu</bpmn:outgoing>
|
||||
@ -8,13 +8,13 @@
|
||||
<bpmn:exclusiveGateway id="Gateway_1lh8c45" name="Decide Which Branch?">
|
||||
<bpmn:incoming>Flow_1ut95vk</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1fok0lz</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_01he29w</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_21he29w</bpmn:outgoing>
|
||||
</bpmn:exclusiveGateway>
|
||||
<bpmn:sequenceFlow id="Flow_1ut95vk" sourceRef="Activity_07iglj7" targetRef="Gateway_1lh8c45" />
|
||||
<bpmn:sequenceFlow id="Flow_1fok0lz" name="a" sourceRef="Gateway_1lh8c45" targetRef="Activity_19ig0xo">
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">which_branch == 'a'</bpmn:conditionExpression>
|
||||
</bpmn:sequenceFlow>
|
||||
<bpmn:sequenceFlow id="Flow_01he29w" name="b" sourceRef="Gateway_1lh8c45" targetRef="Activity_1hx53cu">
|
||||
<bpmn:sequenceFlow id="Flow_21he29w" name="b" sourceRef="Gateway_1lh8c45" targetRef="Activity_1hx53cu">
|
||||
<bpmn:conditionExpression xsi:type="bpmn:tFormalExpression">which_branch == 'b'</bpmn:conditionExpression>
|
||||
</bpmn:sequenceFlow>
|
||||
<bpmn:exclusiveGateway id="Gateway_0ikuwt5">
|
||||
@ -52,7 +52,7 @@
|
||||
<camunda:formField id="FormField_1l30p68" label="Do you like pie?" type="boolean" />
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_01he29w</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_21he29w</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0ozlczo</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
<bpmn:userTask id="Activity_1b15riu" name="Enter Task 3" camunda:formKey="form_task3">
|
||||
@ -88,12 +88,12 @@
|
||||
<di:waypoint x="630" y="177" />
|
||||
<di:waypoint x="685" y="177" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_01he29w_di" bpmnElement="Flow_01he29w">
|
||||
<bpmndi:BPMNEdge id="Flow_01he29w_di" bpmnElement="Flow_21he29w">
|
||||
<di:waypoint x="450" y="202" />
|
||||
<di:waypoint x="450" y="290" />
|
||||
<di:waypoint x="530" y="290" />
|
||||
<bpmndi:BPMNLabel>
|
||||
<dc:Bounds x="462" y="243" width="6" height="14" />
|
||||
<dc:Bounds x="462" y="243" width="7" height="14" />
|
||||
</bpmndi:BPMNLabel>
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1fok0lz_di" bpmnElement="Flow_1fok0lz">
|
||||
|
@ -157,10 +157,12 @@ class TestStudyService(BaseTest):
|
||||
|
||||
def test_get_all_studies(self):
|
||||
user = self.create_user_with_study_and_workflow()
|
||||
study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
|
||||
self.assertIsNotNone(study)
|
||||
|
||||
# Add a document to the study with the correct code.
|
||||
workflow1 = self.create_workflow('docx')
|
||||
workflow2 = self.create_workflow('empty_workflow')
|
||||
workflow1 = self.create_workflow('docx', study=study)
|
||||
workflow2 = self.create_workflow('empty_workflow', study=study)
|
||||
|
||||
# Add files to both workflows.
|
||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||
|
@ -1,29 +1,73 @@
|
||||
from tests.base_test import BaseTest
|
||||
import json
|
||||
from calendar import timegm
|
||||
from datetime import timezone, datetime, timedelta
|
||||
|
||||
from crc import db
|
||||
import jwt
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import db, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudySchema, StudyModel
|
||||
from crc.models.user import UserModel
|
||||
|
||||
|
||||
class TestAuthentication(BaseTest):
|
||||
|
||||
def test_auth_token(self):
|
||||
self.load_example_data()
|
||||
user = UserModel(uid="dhf8r")
|
||||
auth_token = user.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token, bytes))
|
||||
self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
|
||||
def tearDown(self):
|
||||
# Assure we set the production flag back to false.
|
||||
app.config['PRODUCTION'] = False
|
||||
super().tearDown()
|
||||
|
||||
def test_backdoor_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||
def test_auth_token(self):
|
||||
# Save the orginal timeout setting
|
||||
orig_ttl = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
# Set the timeout to something else
|
||||
new_ttl = 4.0
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = new_ttl
|
||||
user_1 = UserModel(uid="dhf8r")
|
||||
expected_exp_1 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||
auth_token_1 = user_1.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_1, bytes))
|
||||
self.assertEqual("dhf8r", user_1.decode_auth_token(auth_token_1).get("sub"))
|
||||
actual_exp_1 = user_1.decode_auth_token(auth_token_1).get("exp")
|
||||
self.assertTrue(expected_exp_1 - 1000 <= actual_exp_1 <= expected_exp_1 + 1000)
|
||||
|
||||
# Set the timeout to something else
|
||||
neg_ttl = -0.01
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = neg_ttl
|
||||
user_2 = UserModel(uid="dhf8r")
|
||||
expected_exp_2 = timegm((datetime.utcnow() + timedelta(hours=neg_ttl)).utctimetuple())
|
||||
auth_token_2 = user_2.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_2, bytes))
|
||||
with self.assertRaises(ApiError) as api_error:
|
||||
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
|
||||
user_2.decode_auth_token(auth_token_2)
|
||||
self.assertEqual(api_error.exception.status_code, 400, 'Should raise an API Error if token is expired')
|
||||
|
||||
# Set the timeout back to where it was
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = orig_ttl
|
||||
user_3 = UserModel(uid="dhf8r")
|
||||
expected_exp_3 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||
auth_token_3 = user_3.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_3, bytes))
|
||||
actual_exp_3 = user_3.decode_auth_token(auth_token_1).get("exp")
|
||||
self.assertTrue(expected_exp_3 - 1000 <= actual_exp_3 <= expected_exp_3 + 1000)
|
||||
|
||||
def test_non_production_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
|
||||
user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator',
|
||||
'email_address': 'czn1z@virginia.edu'}
|
||||
'email_address': 'czn1z@virginia.edu'}
|
||||
redirect_url = 'http://worlds.best.website/admin'
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
url = '/v1.0/sso_backdoor%s' % query_string
|
||||
url = '/v1.0/login%s' % query_string
|
||||
rv_1 = self.app.get(url, follow_redirects=False)
|
||||
self.assertTrue(rv_1.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv_1.location, redirect_url))
|
||||
@ -38,22 +82,30 @@ class TestAuthentication(BaseTest):
|
||||
self.assertTrue(rv_2.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv_2.location, redirect_url))
|
||||
|
||||
def test_normal_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||
def test_production_auth_creates_user(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
|
||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
redirect_url = 'http://worlds.best.website/admin'
|
||||
headers = dict(Uid=new_uid)
|
||||
db.session.flush()
|
||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
|
||||
|
||||
self.assert_success(rv)
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
self.assertEqual(new_uid, user.uid)
|
||||
self.assertEqual("Laura Barnes", user.display_name)
|
||||
self.assertEqual("lb3dp@virginia.edu", user.email_address)
|
||||
self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title)
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def test_current_user_status(self):
|
||||
self.load_example_data()
|
||||
@ -67,3 +119,108 @@ class TestAuthentication(BaseTest):
|
||||
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
|
||||
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_admin_can_access_admin_only_endpoints(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
admin_uids = app.config['ADMIN_UIDS']
|
||||
self.assertGreater(len(admin_uids), 0)
|
||||
admin_uid = admin_uids[0]
|
||||
self.assertEqual(admin_uid, 'dhf8r') # This user is in the test ldap system.
|
||||
admin_headers = dict(Uid=admin_uid)
|
||||
|
||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers)
|
||||
self.assert_success(rv)
|
||||
|
||||
admin_user = db.session.query(UserModel).filter(UserModel.uid == admin_uid).first()
|
||||
self.assertIsNotNone(admin_user)
|
||||
self.assertEqual(admin_uid, admin_user.uid)
|
||||
|
||||
admin_study = self._make_fake_study(admin_uid)
|
||||
|
||||
admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode())
|
||||
|
||||
rv_add_study = self.app.post(
|
||||
'/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=admin_token_headers,
|
||||
data=json.dumps(StudySchema().dump(admin_study)),
|
||||
follow_redirects=False
|
||||
)
|
||||
self.assert_success(rv_add_study, 'Admin user should be able to add a study')
|
||||
|
||||
new_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||
db_admin_study = db.session.query(StudyModel).filter_by(id=new_admin_study['id']).first()
|
||||
self.assertIsNotNone(db_admin_study)
|
||||
|
||||
rv_del_study = self.app.delete(
|
||||
'/v1.0/study/%i' % db_admin_study.id,
|
||||
follow_redirects=False,
|
||||
headers=admin_token_headers
|
||||
)
|
||||
self.assert_success(rv_del_study, 'Admin user should be able to delete a study')
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def test_nonadmin_cannot_access_admin_only_endpoints(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
# Non-admin user should not be able to delete a study
|
||||
non_admin_uid = 'lb3dp'
|
||||
admin_uids = app.config['ADMIN_UIDS']
|
||||
self.assertGreater(len(admin_uids), 0)
|
||||
self.assertNotIn(non_admin_uid, admin_uids)
|
||||
|
||||
non_admin_headers = dict(Uid=non_admin_uid)
|
||||
|
||||
rv = self.app.get(
|
||||
'v1.0/login',
|
||||
follow_redirects=False,
|
||||
headers=non_admin_headers
|
||||
)
|
||||
self.assert_success(rv)
|
||||
|
||||
non_admin_user = db.session.query(UserModel).filter_by(uid=non_admin_uid).first()
|
||||
self.assertIsNotNone(non_admin_user)
|
||||
|
||||
non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode())
|
||||
|
||||
non_admin_study = self._make_fake_study(non_admin_uid)
|
||||
|
||||
rv_add_study = self.app.post(
|
||||
'/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=non_admin_token_headers,
|
||||
data=json.dumps(StudySchema().dump(non_admin_study))
|
||||
)
|
||||
self.assert_success(rv_add_study, 'Non-admin user should be able to add a study')
|
||||
|
||||
new_non_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||
db_non_admin_study = db.session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first()
|
||||
self.assertIsNotNone(db_non_admin_study)
|
||||
|
||||
rv_non_admin_del_study = self.app.delete(
|
||||
'/v1.0/study/%i' % db_non_admin_study.id,
|
||||
follow_redirects=False,
|
||||
headers=non_admin_token_headers
|
||||
)
|
||||
self.assert_failure(rv_non_admin_del_study, 401)
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def _make_fake_study(self, uid):
|
||||
return {
|
||||
"title": "blah",
|
||||
"last_updated": datetime.now(tz=timezone.utc),
|
||||
"protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
|
||||
"primary_investigator_id": uid,
|
||||
"user_uid": uid,
|
||||
}
|
||||
|
@ -4,85 +4,14 @@ import random
|
||||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual("dhf8r", event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
||||
|
||||
def test_get_current_user_tasks(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
@ -185,6 +114,7 @@ class TestTasksApi(BaseTest):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('docx')
|
||||
|
||||
# get the first form in the two form workflow.
|
||||
task = self.get_workflow_api(workflow).next_task
|
||||
data = {
|
||||
@ -203,6 +133,7 @@ class TestTasksApi(BaseTest):
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
files = FileModelSchema(many=True).load(json_data, session=session)
|
||||
self.assertTrue(len(files) == 1)
|
||||
|
||||
# Assure we can still delete the study even when there is a file attached to a workflow.
|
||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
Loading…
x
Reference in New Issue
Block a user