Merge branch 'rrt/dev' into rrt/testing
This commit is contained in:
commit
f3999ab362
|
@ -13,7 +13,6 @@ addons:
|
|||
organization: "sartography"
|
||||
|
||||
before_install:
|
||||
- cp config/travis-testing.py config/testing.py
|
||||
- psql -c 'create database crc_test;' -U postgres
|
||||
|
||||
install:
|
||||
|
@ -22,7 +21,10 @@ install:
|
|||
- pipenv install
|
||||
|
||||
env:
|
||||
- PB_BASE_URL='http://workflow.sartography.com:5001/pb/'
|
||||
global:
|
||||
- TESTING=true
|
||||
- PB_ENABLED=false
|
||||
- SQLALCHEMY_DATABASE_URI="postgresql://postgres:@localhost:5432/crc_test"
|
||||
|
||||
script:
|
||||
- pipenv run coverage run -m pytest
|
||||
|
|
|
@ -1,15 +1,9 @@
|
|||
FROM python:3.7-slim
|
||||
FROM sartography/cr-connect-python-base
|
||||
|
||||
WORKDIR /app
|
||||
COPY Pipfile Pipfile.lock /app/
|
||||
|
||||
RUN set -xe \
|
||||
&& pip install pipenv \
|
||||
&& apt-get update -q \
|
||||
&& apt-get install -y -q \
|
||||
gcc python3-dev libssl-dev \
|
||||
curl postgresql-client git-core \
|
||||
gunicorn3 postgresql-client \
|
||||
&& pipenv install --dev \
|
||||
&& apt-get remove -y gcc python3-dev libssl-dev \
|
||||
&& apt-get autoremove -y \
|
||||
|
|
|
@ -104,17 +104,17 @@
|
|||
},
|
||||
"celery": {
|
||||
"hashes": [
|
||||
"sha256:9ae2e73b93cc7d6b48b56aaf49a68c91752d0ffd7dfdcc47f842ca79a6f13eae",
|
||||
"sha256:c2037b6a8463da43b19969a0fc13f9023ceca6352b4dd51be01c66fbbb13647e"
|
||||
"sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647",
|
||||
"sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b"
|
||||
],
|
||||
"version": "==4.4.4"
|
||||
"version": "==4.4.5"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304",
|
||||
"sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"
|
||||
"sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1",
|
||||
"sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"
|
||||
],
|
||||
"version": "==2020.4.5.1"
|
||||
"version": "==2020.4.5.2"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
|
@ -285,11 +285,11 @@
|
|||
},
|
||||
"flask-marshmallow": {
|
||||
"hashes": [
|
||||
"sha256:6e6aec171b8e092e0eafaf035ff5b8637bf3a58ab46f568c4c1bab02f2a3c196",
|
||||
"sha256:a1685536e7ab5abdc712bbc1ac1a6b0b50951a368502f7985e7d1c27b3c21e59"
|
||||
"sha256:1da1e6454a56a3e15107b987121729f152325bdef23f3df2f9b52bbd074af38e",
|
||||
"sha256:aefc1f1d96256c430a409f08241bab75ffe97e5d14ac5d1f000764e39bf4873a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.12.0"
|
||||
"version": "==0.13.0"
|
||||
},
|
||||
"flask-migrate": {
|
||||
"hashes": [
|
||||
|
@ -359,10 +359,10 @@
|
|||
},
|
||||
"inflection": {
|
||||
"hashes": [
|
||||
"sha256:32a5c3341d9583ec319548b9015b7fbdf8c429cbcb575d326c33ae3a0e90d52c",
|
||||
"sha256:9a15d3598f01220e93f2207c432cfede50daff53137ce660fb8be838ef1ca6cc"
|
||||
"sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9",
|
||||
"sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924"
|
||||
],
|
||||
"version": "==0.4.0"
|
||||
"version": "==0.5.0"
|
||||
},
|
||||
"itsdangerous": {
|
||||
"hashes": [
|
||||
|
@ -751,11 +751,11 @@
|
|||
},
|
||||
"sphinx": {
|
||||
"hashes": [
|
||||
"sha256:779a519adbd3a70fc7c468af08c5e74829868b0a5b34587b33340e010291856c",
|
||||
"sha256:ea64df287958ee5aac46be7ac2b7277305b0381d213728c3a49d8bb9b8415807"
|
||||
"sha256:1c445320a3310baa5ccb8d957267ef4a0fc930dc1234db5098b3d7af14fbb242",
|
||||
"sha256:7d3d5087e39ab5a031b75588e9859f011de70e213cd0080ccbc28079fb0786d1"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.4"
|
||||
"version": "==3.1.0"
|
||||
},
|
||||
"sphinxcontrib-applehelp": {
|
||||
"hashes": [
|
||||
|
@ -990,10 +990,10 @@
|
|||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:980fbf4f3c196c0f329cdcd1e84c554d6a211f18e252e525a0cf4223154a41d6",
|
||||
"sha256:edbc2b718b4db6cdf393eefe3a420183947d6aa312505ce6754516f458ff8830"
|
||||
"sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f",
|
||||
"sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"
|
||||
],
|
||||
"version": "==0.2.3"
|
||||
"version": "==0.2.4"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
|
|
|
@ -9,9 +9,10 @@ JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values
|
|||
NAME = "CR Connect Workflow"
|
||||
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default="5000")
|
||||
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default="localhost:4200, localhost:5002"))
|
||||
DEVELOPMENT = environ.get('DEVELOPMENT', default="true") == "true"
|
||||
TESTING = environ.get('TESTING', default="false") == "true"
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true") or (not DEVELOPMENT and not TESTING)
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
|
||||
TEST_UID = environ.get('TEST_UID', default="dhf8r")
|
||||
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah13us,cl3wf"))
|
||||
|
||||
# Sentry flag
|
||||
ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
|
||||
|
@ -28,14 +29,14 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
|||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
TOKEN_AUTH_TTL_HOURS = int(environ.get('TOKEN_AUTH_TTL_HOURS', default=4))
|
||||
TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
|
||||
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
|
||||
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
|
||||
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
|
||||
|
||||
# %s/%i placeholders expected for uva_id and study_id in various calls.
|
||||
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/pb/").strip('/') + '/' # Trailing slash required
|
||||
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/v2.0/").strip('/') + '/' # Trailing slash required
|
||||
PB_USER_STUDIES_URL = environ.get('PB_USER_STUDIES_URL', default=PB_BASE_URL + "user_studies?uva_id=%s")
|
||||
PB_INVESTIGATORS_URL = environ.get('PB_INVESTIGATORS_URL', default=PB_BASE_URL + "investigators?studyid=%i")
|
||||
PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL + "required_docs?studyid=%i")
|
||||
|
@ -46,9 +47,10 @@ LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
|
|||
|
||||
# Email configuration
|
||||
FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com']
|
||||
MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
|
||||
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
|
||||
MAIL_PORT = environ.get('MAIL_PORT', default=2525)
|
||||
MAIL_USE_SSL = environ.get('MAIL_USE_SSL', default=False)
|
||||
MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=True)
|
||||
MAIL_USERNAME = environ.get('MAIL_USERNAME', default='5f012d0108d374')
|
||||
MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='08442c04e98d50')
|
||||
MAIL_USERNAME = environ.get('MAIL_USERNAME', default='')
|
||||
MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='')
|
||||
|
|
|
@ -4,16 +4,15 @@ from os import environ
|
|||
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
NAME = "CR Connect Workflow"
|
||||
DEVELOPMENT = True
|
||||
TESTING = True
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
PB_ENABLED = False
|
||||
|
||||
# This is here, for when we are running the E2E Tests in the frontend code bases.
|
||||
# which will set the TESTING envronment to true, causing this to execute, but we need
|
||||
# to respect the environment variables in that case.
|
||||
# when running locally the defaults apply, meaning we use crc_test for doing the tests
|
||||
# locally, and we don't over-write the database. Did you read this far? Have a cookie!
|
||||
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
|
||||
DB_HOST = environ.get('DB_HOST', default="localhost")
|
||||
DB_PORT = environ.get('DB_PORT', default="5432")
|
||||
DB_NAME = environ.get('DB_NAME', default="crc_test")
|
||||
|
@ -23,8 +22,8 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
|||
'SQLALCHEMY_DATABASE_URI',
|
||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||
)
|
||||
ADMIN_UIDS = ['dhf8r']
|
||||
|
||||
print('### USING TESTING CONFIG: ###')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
||||
print('TESTING = ', TESTING)
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
import os
|
||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
NAME = "CR Connect Workflow"
|
||||
DEVELOPMENT = True
|
||||
TESTING = True
|
||||
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
|
||||
TOKEN_AUTH_TTL_HOURS = 2
|
||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||
FRONTEND_AUTH_CALLBACK = "http://localhost:4200/session" # Not Required
|
||||
PB_ENABLED = False
|
||||
|
||||
print('+++ USING TRAVIS TESTING CONFIG: +++')
|
||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
||||
print('TESTING = ', TESTING)
|
||||
print('FRONTEND_AUTH_CALLBACK = ', FRONTEND_AUTH_CALLBACK)
|
|
@ -57,15 +57,16 @@ env = Environment(loader=FileSystemLoader(template_dir))
|
|||
mail = Mail(app)
|
||||
|
||||
print('=== USING THESE CONFIG SETTINGS: ===')
|
||||
print('DB_HOST = ', )
|
||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||
print('DEVELOPMENT = ', app.config['DEVELOPMENT'])
|
||||
print('TESTING = ', app.config['TESTING'])
|
||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||
print('DB_HOST = ', app.config['DB_HOST'])
|
||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||
print('PB_ENABLED = ', app.config['PB_ENABLED'])
|
||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||
print('TESTING = ', app.config['TESTING'])
|
||||
print('TEST_UID = ', app.config['TEST_UID'])
|
||||
print('ADMIN_UIDS = ', app.config['ADMIN_UIDS'])
|
||||
|
||||
@app.cli.command()
|
||||
def load_example_data():
|
||||
|
|
85
crc/api.yml
85
crc/api.yml
|
@ -9,54 +9,18 @@ servers:
|
|||
security:
|
||||
- jwt: ['secret']
|
||||
paths:
|
||||
/sso_backdoor:
|
||||
/login:
|
||||
get:
|
||||
operationId: crc.api.user.backdoor
|
||||
summary: A backdoor that allows someone to log in as a specific user, if they
|
||||
are in a staging environment.
|
||||
operationId: crc.api.user.login
|
||||
summary: In production, logs the user in via SSO. If not in production, logs in as a specific user for testing.
|
||||
security: [] # Disable security for this endpoint only.
|
||||
parameters:
|
||||
- name: uid
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: email_address
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: display_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: affiliation
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: eppn
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: first_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: last_name
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: title
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: redirect
|
||||
- name: redirect_url
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
|
@ -150,6 +114,8 @@ paths:
|
|||
$ref: "#/components/schemas/Study"
|
||||
delete:
|
||||
operationId: crc.api.study.delete_study
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes the given study completely.
|
||||
tags:
|
||||
- Studies
|
||||
|
@ -251,6 +217,8 @@ paths:
|
|||
$ref: "#/components/schemas/WorkflowSpec"
|
||||
put:
|
||||
operationId: crc.api.workflow.update_workflow_specification
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Modifies an existing workflow specification with the given parameters.
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
|
@ -268,6 +236,8 @@ paths:
|
|||
$ref: "#/components/schemas/WorkflowSpec"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow_specification
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow specification
|
||||
tags:
|
||||
- Workflow Specifications
|
||||
|
@ -313,6 +283,8 @@ paths:
|
|||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
post:
|
||||
operationId: crc.api.workflow.add_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Creates a new workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
|
@ -350,6 +322,8 @@ paths:
|
|||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
put:
|
||||
operationId: crc.api.workflow.update_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Modifies an existing workflow spec category with the given parameters.
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
|
@ -367,6 +341,8 @@ paths:
|
|||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow_spec_category
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow spec category
|
||||
tags:
|
||||
- Workflow Specification Category
|
||||
|
@ -566,6 +542,8 @@ paths:
|
|||
example: '<?xml version="1.0" encoding="UTF-8"?><bpmn:definitions></bpmn:definitions>'
|
||||
put:
|
||||
operationId: crc.api.file.set_reference_file
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Update the contents of a named reference file.
|
||||
tags:
|
||||
- Files
|
||||
|
@ -624,6 +602,8 @@ paths:
|
|||
$ref: "#/components/schemas/Workflow"
|
||||
delete:
|
||||
operationId: crc.api.workflow.delete_workflow
|
||||
security:
|
||||
- auth_admin: ['secret']
|
||||
summary: Removes an existing workflow
|
||||
tags:
|
||||
- Workflows and Tasks
|
||||
|
@ -762,6 +742,26 @@ paths:
|
|||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
/send_email:
|
||||
parameters:
|
||||
- name: address
|
||||
in: query
|
||||
required: true
|
||||
description: The address to send a test email to.
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
operationId: crc.api.tools.send_email
|
||||
summary: Sends an email so we can see if things work or not.
|
||||
tags:
|
||||
- Configurator Tools
|
||||
responses:
|
||||
'201':
|
||||
description: Returns any error messages that might come back from sending the email.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
/render_docx:
|
||||
put:
|
||||
operationId: crc.api.tools.render_docx
|
||||
|
@ -924,6 +924,11 @@ components:
|
|||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
x-bearerInfoFunc: crc.api.user.verify_token
|
||||
auth_admin:
|
||||
type: http
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
x-bearerInfoFunc: crc.api.user.verify_token_admin
|
||||
schemas:
|
||||
User:
|
||||
properties:
|
||||
|
|
|
@ -24,7 +24,6 @@ def get_approval_counts(as_user=None):
|
|||
.all()
|
||||
|
||||
study_ids = [a.study_id for a in db_user_approvals]
|
||||
print('study_ids', study_ids)
|
||||
|
||||
db_other_approvals = db.session.query(ApprovalModel)\
|
||||
.filter(ApprovalModel.study_id.in_(study_ids))\
|
||||
|
@ -39,8 +38,8 @@ def get_approval_counts(as_user=None):
|
|||
other_approvals[approval.study_id] = approval
|
||||
|
||||
counts = {}
|
||||
for status in ApprovalStatus:
|
||||
counts[status.name] = 0
|
||||
for name, value in ApprovalStatus.__members__.items():
|
||||
counts[name] = 0
|
||||
|
||||
for approval in db_user_approvals:
|
||||
# Check if another approval has the same study id
|
||||
|
@ -57,6 +56,8 @@ def get_approval_counts(as_user=None):
|
|||
counts[ApprovalStatus.CANCELED.name] += 1
|
||||
elif other_approval.status == ApprovalStatus.APPROVED.name:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@ from crc.api.common import ApiError
|
|||
from crc.scripts.complete_template import CompleteTemplate
|
||||
from crc.scripts.script import Script
|
||||
import crc.scripts
|
||||
from crc.services.mails import send_test_email
|
||||
|
||||
|
||||
def render_markdown(data, template):
|
||||
"""
|
||||
|
@ -59,3 +61,8 @@ def list_scripts():
|
|||
})
|
||||
return script_meta
|
||||
|
||||
def send_email(address):
|
||||
"""Just sends a quick test email to assure the system is working."""
|
||||
if not address:
|
||||
address = "dan@sartography.com"
|
||||
return send_test_email(address, [address])
|
252
crc/api/user.py
252
crc/api/user.py
|
@ -10,29 +10,113 @@ from crc.services.ldap_service import LdapService, LdapModel
|
|||
.. module:: crc.api.user
|
||||
:synopsis: Single Sign On (SSO) user login and session handlers
|
||||
"""
|
||||
def verify_token(token):
|
||||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate", status_code=403)
|
||||
if (not 'PRODUCTION' in app.config or not app.config['PRODUCTION']) and token == app.config["SWAGGER_AUTH_KEY"]:
|
||||
|
||||
|
||||
def verify_token(token=None):
|
||||
"""
|
||||
Verifies the token for the user (if provided). If in production environment and token is not provided,
|
||||
gets user from the SSO headers and returns their token.
|
||||
|
||||
Args:
|
||||
token: Optional[str]
|
||||
|
||||
Returns:
|
||||
token: str
|
||||
|
||||
Raises:
|
||||
ApiError. If not on production and token is not valid, returns an 'invalid_token' 403 error.
|
||||
If on production and user is not authenticated, returns a 'no_user' 403 error.
|
||||
"""
|
||||
|
||||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
|
||||
status_code=403)
|
||||
|
||||
if not _is_production() and (token is None or 'user' not in g):
|
||||
g.user = UserModel.query.first()
|
||||
token = g.user.encode_auth_token()
|
||||
|
||||
try:
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
||||
except:
|
||||
raise failure_error
|
||||
if g.user is not None:
|
||||
return token_info
|
||||
if token:
|
||||
try:
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
||||
except:
|
||||
raise failure_error
|
||||
if g.user is not None:
|
||||
return token_info
|
||||
else:
|
||||
raise failure_error
|
||||
|
||||
# If there's no token and we're in production, get the user from the SSO headers and return their token
|
||||
if not token and _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid is not None:
|
||||
db_user = UserModel.query.filter_by(uid=uid).first()
|
||||
|
||||
if db_user is not None:
|
||||
g.user = db_user
|
||||
token = g.user.encode_auth_token().decode()
|
||||
token_info = UserModel.decode_auth_token(token)
|
||||
return token_info
|
||||
|
||||
else:
|
||||
raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.",
|
||||
status_code=403)
|
||||
|
||||
|
||||
def verify_token_admin(token=None):
|
||||
"""
|
||||
Verifies the token for the user (if provided) in non-production environment. If in production environment,
|
||||
checks that the user is in the list of authorized admins
|
||||
|
||||
Args:
|
||||
token: Optional[str]
|
||||
|
||||
Returns:
|
||||
token: str
|
||||
"""
|
||||
|
||||
# If this is production, check that the user is in the list of admins
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid is not None and uid in app.config['ADMIN_UIDS']:
|
||||
return verify_token()
|
||||
|
||||
# If we're not in production, just use the normal verify_token method
|
||||
else:
|
||||
raise failure_error
|
||||
return verify_token(token)
|
||||
|
||||
|
||||
def get_current_user():
|
||||
return UserModelSchema().dump(g.user)
|
||||
|
||||
@app.route('/v1.0/login')
|
||||
def sso_login():
|
||||
# This what I see coming back:
|
||||
|
||||
def login(
|
||||
uid=None,
|
||||
redirect_url=None,
|
||||
):
|
||||
"""
|
||||
In non-production environment, provides an endpoint for end-to-end system testing that allows the system
|
||||
to simulate logging in as a specific user. In production environment, simply logs user in via single-sign-on
|
||||
(SSO) Shibboleth authentication headers.
|
||||
|
||||
Args:
|
||||
uid: Optional[str]
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
||||
If on production and user is authenticated via SSO, returns the frontend auth callback URL,
|
||||
with auth token appended.
|
||||
|
||||
Raises:
|
||||
ApiError. If on production and user is not authenticated, returns a 404 error.
|
||||
"""
|
||||
|
||||
# ----------------------------------------
|
||||
# Shibboleth Authentication Headers
|
||||
# ----------------------------------------
|
||||
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
|
||||
# X-Remote-Sn: Funk
|
||||
# X-Remote-Givenname: Daniel
|
||||
|
@ -47,59 +131,52 @@ def sso_login():
|
|||
# X-Forwarded-Host: dev.crconnect.uvadcos.io
|
||||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||
# Connection: Keep-Alive
|
||||
uid = request.headers.get("Uid")
|
||||
if not uid:
|
||||
uid = request.headers.get("X-Remote-Uid")
|
||||
|
||||
if not uid:
|
||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||
% str(request.headers))
|
||||
|
||||
redirect = request.args.get('redirect')
|
||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
|
||||
info = LdapService.user_info(uid)
|
||||
return _handle_login(info, redirect)
|
||||
# If we're in production, override any uid with the uid from the SSO request headers
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
||||
if uid:
|
||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect_url))
|
||||
|
||||
ldap_info = LdapService().user_info(uid)
|
||||
|
||||
if ldap_info:
|
||||
return _handle_login(ldap_info, redirect_url)
|
||||
|
||||
raise ApiError('404', 'unknown')
|
||||
|
||||
|
||||
@app.route('/sso')
|
||||
def sso():
|
||||
response = ""
|
||||
response += "<h1>Headers</h1>"
|
||||
response += "<ul>"
|
||||
for k,v in request.headers:
|
||||
for k, v in request.headers:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
response += "<h1>Environment</h1>"
|
||||
for k,v in request.environ:
|
||||
for k, v in request.environ:
|
||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||
return response
|
||||
|
||||
|
||||
def _handle_login(user_info: LdapModel, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
||||
"""On successful login, adds user to database if the user is not already in the system,
|
||||
then returns the frontend auth callback URL, with auth token appended.
|
||||
def _handle_login(user_info: LdapModel, redirect_url=None):
|
||||
"""
|
||||
On successful login, adds user to database if the user is not already in the system,
|
||||
then returns the frontend auth callback URL, with auth token appended.
|
||||
|
||||
Args:
|
||||
user_info - an ldap user_info object.
|
||||
redirect_url: Optional[str]
|
||||
Args:
|
||||
user_info - an ldap user_info object.
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
Returns:
|
||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
"""
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||
|
||||
if user is None:
|
||||
# Add new user
|
||||
user = UserModel()
|
||||
|
||||
user.uid = user_info.uid
|
||||
user.display_name = user_info.display_name
|
||||
user.email_address = user_info.email_address
|
||||
user.affiliation = user_info.affiliation
|
||||
user.title = user_info.title
|
||||
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
user = _upsert_user(user_info)
|
||||
g.user = user
|
||||
|
||||
# Return the frontend auth callback URL, with auth token appended.
|
||||
auth_token = user.encode_auth_token().decode()
|
||||
|
@ -114,41 +191,44 @@ def _handle_login(user_info: LdapModel, redirect_url=app.config['FRONTEND_AUTH_C
|
|||
return auth_token
|
||||
|
||||
|
||||
def _upsert_user(user_info):
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||
|
||||
def backdoor(
|
||||
uid=None,
|
||||
affiliation=None,
|
||||
display_name=None,
|
||||
email_address=None,
|
||||
eppn=None,
|
||||
first_name=None,
|
||||
last_name=None,
|
||||
title=None,
|
||||
redirect=None,
|
||||
):
|
||||
"""A backdoor for end-to-end system testing that allows the system to simulate logging in as a specific user.
|
||||
Only works if the application is running in a non-production environment.
|
||||
|
||||
Args:
|
||||
uid: str
|
||||
affiliation: Optional[str]
|
||||
display_name: Optional[str]
|
||||
email_address: Optional[str]
|
||||
eppn: Optional[str]
|
||||
first_name: Optional[str]
|
||||
last_name: Optional[str]
|
||||
title: Optional[str]
|
||||
redirect_url: Optional[str]
|
||||
|
||||
Returns:
|
||||
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
||||
|
||||
Raises:
|
||||
ApiError. If on production, returns a 404 error.
|
||||
"""
|
||||
if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
|
||||
|
||||
ldap_info = LdapService.user_info(uid)
|
||||
return _handle_login(ldap_info, redirect)
|
||||
if user is None:
|
||||
# Add new user
|
||||
user = UserModel()
|
||||
else:
|
||||
raise ApiError('404', 'unknown')
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first()
|
||||
|
||||
user.uid = user_info.uid
|
||||
user.display_name = user_info.display_name
|
||||
user.email_address = user_info.email_address
|
||||
user.affiliation = user_info.affiliation
|
||||
user.title = user_info.title
|
||||
|
||||
db.session.add(user)
|
||||
db.session.commit()
|
||||
return user
|
||||
|
||||
|
||||
def _get_request_uid(req):
|
||||
uid = None
|
||||
|
||||
if _is_production():
|
||||
|
||||
if 'user' in g and g.user is not None:
|
||||
return g.user.uid
|
||||
|
||||
uid = req.headers.get("Uid")
|
||||
if not uid:
|
||||
uid = req.headers.get("X-Remote-Uid")
|
||||
|
||||
if not uid:
|
||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||
% str(req.headers))
|
||||
|
||||
return uid
|
||||
|
||||
|
||||
def _is_production():
|
||||
return 'PRODUCTION' in app.config and app.config['PRODUCTION']
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import uuid
|
||||
|
||||
from crc import session
|
||||
from flask import g
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
from crc.models.file import FileModel, LookupDataSchema
|
||||
|
@ -129,7 +131,7 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
|||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=processor.workflow_model.total_tasks,
|
||||
total_tasks=len(navigation),
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
last_updated=processor.workflow_model.last_updated,
|
||||
title=spec.display_name
|
||||
|
@ -156,6 +158,7 @@ def delete_workflow(workflow_id):
|
|||
|
||||
def set_current_task(workflow_id, task_id):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -167,13 +170,21 @@ def set_current_task(workflow_id, task_id):
|
|||
if task.state == task.COMPLETED:
|
||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
def update_task(workflow_id, task_id, body):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
|
||||
if workflow_model is None:
|
||||
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
|
||||
|
||||
elif workflow_model.study is None:
|
||||
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
|
||||
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -184,7 +195,7 @@ def update_task(workflow_id, task_id, body):
|
|||
processor.complete_task(task)
|
||||
processor.do_engine_steps()
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
@ -239,3 +250,14 @@ def lookup(workflow_id, field_id, query, limit):
|
|||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
||||
|
||||
|
||||
def __get_user_uid(user_uid):
|
||||
if 'user' in g:
|
||||
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||
else:
|
||||
return g.user.uid
|
||||
|
||||
else:
|
||||
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from flask_marshmallow.sqla import SQLAlchemyAutoSchema
|
||||
from marshmallow import EXCLUDE
|
||||
from sqlalchemy import func, inspect
|
||||
from sqlalchemy import func
|
||||
|
||||
from crc import db
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ class UserModel(db.Model):
|
|||
last_name = db.Column(db.String, nullable=True)
|
||||
title = db.Column(db.String, nullable=True)
|
||||
|
||||
# Add Department and School
|
||||
# TODO: Add Department and School
|
||||
|
||||
|
||||
def encode_auth_token(self):
|
||||
|
@ -27,7 +27,7 @@ class UserModel(db.Model):
|
|||
Generates the Auth Token
|
||||
:return: string
|
||||
"""
|
||||
hours = int(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
hours = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
payload = {
|
||||
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=hours, minutes=0, seconds=0),
|
||||
'iat': datetime.datetime.utcnow(),
|
||||
|
@ -36,7 +36,7 @@ class UserModel(db.Model):
|
|||
return jwt.encode(
|
||||
payload,
|
||||
app.config.get('TOKEN_AUTH_SECRET_KEY'),
|
||||
algorithm='HS256'
|
||||
algorithm='HS256',
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
@ -50,9 +50,9 @@ class UserModel(db.Model):
|
|||
payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256')
|
||||
return payload
|
||||
except jwt.ExpiredSignatureError:
|
||||
raise ApiError('token_expired', 'The Authentication token you provided expired, and must be renewed.')
|
||||
raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.')
|
||||
except jwt.InvalidTokenError:
|
||||
raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ')
|
||||
raise ApiError('token_invalid', 'The Authentication token you provided is invalid. You need a new token. ')
|
||||
|
||||
|
||||
class UserModelSchema(SQLAlchemyAutoSchema):
|
||||
|
|
|
@ -129,21 +129,25 @@ class ApprovalService(object):
|
|||
pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
|
||||
approver_info = ldap_service.user_info(approver_uid)
|
||||
# send rrp submission
|
||||
send_ramp_up_approved_email(
|
||||
mail_result = send_ramp_up_approved_email(
|
||||
'askresearch@virginia.edu',
|
||||
[pi_user_info.email_address],
|
||||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
elif status == ApprovalStatus.DECLINED.value:
|
||||
ldap_service = LdapService()
|
||||
pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
|
||||
approver_info = ldap_service.user_info(approver_uid)
|
||||
# send rrp submission
|
||||
send_ramp_up_denied_email(
|
||||
mail_result = send_ramp_up_denied_email(
|
||||
'askresearch@virginia.edu',
|
||||
[pi_user_info.email_address],
|
||||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
first_approval = ApprovalModel().query.filter_by(
|
||||
study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
|
||||
status=ApprovalStatus.APPROVED.value, version=db_approval.version).first()
|
||||
|
@ -152,12 +156,14 @@ class ApprovalService(object):
|
|||
first_approver_info = ldap_service.user_info(first_approval.approver_uid)
|
||||
approver_email = [first_approver_info.email_address] if first_approver_info.email_address else app.config['FALLBACK_EMAILS']
|
||||
# send rrp denied by second approver email to first approver
|
||||
send_ramp_up_denied_email_to_approver(
|
||||
mail_result = send_ramp_up_denied_email_to_approver(
|
||||
'askresearch@virginia.edu',
|
||||
approver_email,
|
||||
f'{pi_user_info.display_name} - ({pi_user_info.uid})',
|
||||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
# TODO: Log update action by approver_uid - maybe ?
|
||||
return db_approval
|
||||
|
||||
|
@ -222,19 +228,23 @@ class ApprovalService(object):
|
|||
pi_user_info = ldap_service.user_info(model.study.primary_investigator_id)
|
||||
approver_info = ldap_service.user_info(approver_uid)
|
||||
# send rrp submission
|
||||
send_ramp_up_submission_email(
|
||||
mail_result = send_ramp_up_submission_email(
|
||||
'askresearch@virginia.edu',
|
||||
[pi_user_info.email_address],
|
||||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
# send rrp approval request for first approver
|
||||
# enhance the second part in case it bombs
|
||||
approver_email = [approver_info.email_address] if approver_info.email_address else app.config['FALLBACK_EMAILS']
|
||||
send_ramp_up_approval_request_first_review_email(
|
||||
mail_result = send_ramp_up_approval_request_first_review_email(
|
||||
'askresearch@virginia.edu',
|
||||
approver_email,
|
||||
f'{pi_user_info.display_name} - ({pi_user_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
|
||||
@staticmethod
|
||||
def _create_approval_files(workflow_data_files, approval):
|
||||
|
|
|
@ -5,12 +5,29 @@ from flask_mail import Message
|
|||
|
||||
|
||||
# TODO: Extract common mailing code into its own function
|
||||
def send_test_email(sender, recipients):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan test',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||
template_vars = {'primary_investigator': "test"}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approval_request_first_review.html')
|
||||
msg.html = template.render(template_vars)
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
|
||||
def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Submitted',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_submission.txt')
|
||||
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||
|
@ -26,7 +43,8 @@ def send_ramp_up_approval_request_email(sender, recipients, primary_investigator
|
|||
try:
|
||||
msg = Message('Research Ramp-up Plan Approval Request',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
|
@ -42,7 +60,8 @@ def send_ramp_up_approval_request_first_review_email(sender, recipients, primary
|
|||
try:
|
||||
msg = Message('Research Ramp-up Plan Approval Request',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
|
@ -58,7 +77,8 @@ def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None)
|
|||
try:
|
||||
msg = Message('Research Ramp-up Plan Approved',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approved.txt')
|
||||
|
@ -75,7 +95,8 @@ def send_ramp_up_denied_email(sender, recipients, approver):
|
|||
try:
|
||||
msg = Message('Research Ramp-up Plan Denied',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_denied.txt')
|
||||
|
@ -92,7 +113,8 @@ def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigat
|
|||
try:
|
||||
msg = Message('Research Ramp-up Plan Denied',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_denied_first_approver.txt')
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import json
|
||||
from json import JSONDecodeError
|
||||
from typing import List, Optional
|
||||
|
||||
import requests
|
||||
|
@ -26,10 +27,17 @@ class ProtocolBuilderService(object):
|
|||
ProtocolBuilderService.__enabled_or_raise()
|
||||
if not isinstance(user_id, str):
|
||||
raise ApiError("protocol_builder_error", "This user id is invalid: " + str(user_id))
|
||||
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
|
||||
url = ProtocolBuilderService.STUDY_URL % user_id
|
||||
response = requests.get(url)
|
||||
if response.ok and response.text:
|
||||
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
||||
return pb_studies
|
||||
try:
|
||||
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
||||
return pb_studies
|
||||
except JSONDecodeError as err:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder. The response is not "
|
||||
"valid json. Url: %s, Response: %s, error: %s" %
|
||||
(url, response.text, err.msg))
|
||||
else:
|
||||
raise ApiError("protocol_builder_error",
|
||||
"Received an invalid response from the protocol builder (status %s): %s" %
|
||||
|
|
|
@ -86,8 +86,8 @@ class StudyService(object):
|
|||
def delete_workflow(workflow):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||
FileService.delete_file(file.id)
|
||||
for deb in workflow.dependencies:
|
||||
session.delete(deb)
|
||||
for dep in workflow.dependencies:
|
||||
session.delete(dep)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||
|
||||
|
@ -133,7 +133,7 @@ class StudyService(object):
|
|||
that is available.."""
|
||||
|
||||
# Get PB required docs, if Protocol Builder Service is enabled.
|
||||
if ProtocolBuilderService.is_enabled():
|
||||
if ProtocolBuilderService.is_enabled() and study_id is not None:
|
||||
try:
|
||||
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
|
||||
except requests.exceptions.ConnectionError as ce:
|
||||
|
|
|
@ -58,7 +58,7 @@ class WorkflowService(object):
|
|||
|
||||
@staticmethod
|
||||
def delete_test_data():
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
|
||||
StudyService.delete_study(study.id)
|
||||
db.session.commit()
|
||||
|
||||
|
@ -318,12 +318,12 @@ class WorkflowService(object):
|
|||
field.options.append({"id": d.value, "name": d.label})
|
||||
|
||||
@staticmethod
|
||||
def log_task_action(processor, spiff_task, action):
|
||||
def log_task_action(user_uid, processor, spiff_task, action):
|
||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||
workflow_model = processor.workflow_model
|
||||
task_event = TaskEventModel(
|
||||
study_id=workflow_model.study_id,
|
||||
user_uid=g.user.uid,
|
||||
user_uid=user_uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
<p>A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your [Research Ramp-up Toolkit](https://rrt.uvadcos.io/app/approvals).</p>
|
||||
<p>A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||
<a href="https://rrt.uvadcos.io/app/approvals">Research Ramp-up Toolkit]</a></p>
|
|
@ -1 +1,2 @@
|
|||
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your [Research Ramp-up Toolkit](https://rrt.uvadcos.io/app/approvals).
|
||||
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||
Research Ramp-up Toolkit: https://rrt.uvadcos.io/app/approvals.
|
|
@ -1 +1,2 @@
|
|||
<p>A Research Ramp-up approval request from {{ primary_investigator }} and is now available for your review in your [Research Ramp-up Toolkit](https://rrt.uvadcos.io/app/approvals).</p>
|
||||
<p>A Research Ramp-up approval request from {{ primary_investigator }} and is now available for your review in your
|
||||
<a href="https://rrt.uvadcos.io/app/approvals">Research Ramp-up Toolkit</a>.</p>
|
|
@ -1 +1,2 @@
|
|||
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your [Research Ramp-up Toolkit](https://rrt.uvadcos.io/app/approvals).
|
||||
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||
Research Ramp-up Toolkit at https://rrt.uvadcos.io/app/approvals.
|
|
@ -2,24 +2,27 @@
|
|||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import os
|
||||
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.models.user import UserModel
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
|
@ -95,7 +98,7 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
def tearDown(self):
|
||||
ExampleDataLoader.clean_db()
|
||||
session.flush()
|
||||
g.user = None
|
||||
self.auths = {}
|
||||
|
||||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||
|
@ -107,12 +110,16 @@ class BaseTest(unittest.TestCase):
|
|||
user_info = {'uid': user.uid}
|
||||
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
|
||||
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
||||
self.assertTrue(rv.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv.location, redirect_url))
|
||||
|
||||
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
||||
self.assertIsNotNone(user_model.display_name)
|
||||
self.assertEqual(user_model.uid, uid)
|
||||
self.assertTrue('user' in g, 'User should be in Flask globals')
|
||||
self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid')
|
||||
|
||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||
|
||||
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||
|
@ -159,6 +166,7 @@ class BaseTest(unittest.TestCase):
|
|||
@staticmethod
|
||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
|
@ -198,7 +206,7 @@ class BaseTest(unittest.TestCase):
|
|||
for key, value in items:
|
||||
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
||||
|
||||
query_string_list.append('redirect=%s' % redirect_url)
|
||||
query_string_list.append('redirect_url=%s' % redirect_url)
|
||||
|
||||
return '?%s' % '&'.join(query_string_list)
|
||||
|
||||
|
@ -222,12 +230,12 @@ class BaseTest(unittest.TestCase):
|
|||
db.session.commit()
|
||||
return user
|
||||
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
|
||||
study = session.query(StudyModel).first()
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
|
||||
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
|
||||
if study is None:
|
||||
user = self.create_user(uid=uid)
|
||||
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid=user.uid, primary_investigator_id='lb3dp')
|
||||
user_uid=user.uid, primary_investigator_id=primary_investigator_id)
|
||||
db.session.add(study)
|
||||
db.session.commit()
|
||||
return study
|
||||
|
@ -249,3 +257,97 @@ class BaseTest(unittest.TestCase):
|
|||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
def create_approval(
|
||||
self,
|
||||
study=None,
|
||||
workflow=None,
|
||||
approver_uid=None,
|
||||
status=None,
|
||||
version=None,
|
||||
):
|
||||
study = study or self.create_study()
|
||||
workflow = workflow or self.create_workflow()
|
||||
approver_uid = approver_uid or self.test_uid
|
||||
status = status or ApprovalStatus.PENDING.value
|
||||
version = version or 1
|
||||
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version)
|
||||
db.session.add(approval)
|
||||
db.session.commit()
|
||||
return approval
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(user),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(user=user),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual(user_uid, event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
|
||||
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
|
|
@ -1,53 +1,39 @@
|
|||
import json
|
||||
from tests.base_test import BaseTest
|
||||
import random
|
||||
import string
|
||||
|
||||
from flask import g
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import session, db
|
||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
class TestApprovals(BaseTest):
|
||||
def setUp(self):
|
||||
"""Initial setup shared by all TestApprovals tests"""
|
||||
self.load_example_data()
|
||||
self.study = self.create_study()
|
||||
self.workflow = self.create_workflow('random_fact')
|
||||
self.unrelated_study = StudyModel(title="second study",
|
||||
protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid="dhf8r", primary_investigator_id="dhf8r")
|
||||
self.unrelated_workflow = self.create_workflow('random_fact', study=self.unrelated_study)
|
||||
|
||||
# TODO: Move to base_test as a helper
|
||||
self.approval = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 2 approvers
|
||||
study_workflow_approvals_1 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp",
|
||||
approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval)
|
||||
self.study = study_workflow_approvals_1['study']
|
||||
self.workflow = study_workflow_approvals_1['workflow']
|
||||
self.approval = study_workflow_approvals_1['approvals'][0]
|
||||
self.approval_2 = study_workflow_approvals_1['approvals'][1]
|
||||
|
||||
self.approval_2 = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='dhf8r',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 1 approver
|
||||
study_workflow_approvals_2 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="second study", primary_investigator_id="dhf8r",
|
||||
approver_uids=["lb3dp"], statuses=[ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval_2)
|
||||
|
||||
# A third study, unrelated to the first.
|
||||
self.approval_3 = ApprovalModel(
|
||||
study=self.unrelated_study,
|
||||
workflow=self.unrelated_workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
)
|
||||
session.add(self.approval_3)
|
||||
|
||||
session.commit()
|
||||
self.unrelated_study = study_workflow_approvals_2['study']
|
||||
self.unrelated_workflow = study_workflow_approvals_2['workflow']
|
||||
self.approval_3 = study_workflow_approvals_2['approvals'][0]
|
||||
|
||||
def test_list_approvals_per_approver(self):
|
||||
"""Only approvals associated with approver should be returned"""
|
||||
|
@ -85,7 +71,7 @@ class TestApprovals(BaseTest):
|
|||
response = json.loads(rv.get_data(as_text=True))
|
||||
response_count = len(response)
|
||||
self.assertEqual(1, response_count)
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
|
||||
def test_update_approval_fails_if_not_the_approver(self):
|
||||
approval = session.query(ApprovalModel).filter_by(approver_uid='lb3dp').first()
|
||||
|
@ -145,9 +131,130 @@ class TestApprovals(BaseTest):
|
|||
self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
|
||||
|
||||
def test_csv_export(self):
|
||||
approvals = db.session.query(ApprovalModel).all()
|
||||
for app in approvals:
|
||||
app.status = ApprovalStatus.APPROVED.value
|
||||
db.session.commit()
|
||||
self.load_test_spec('two_forms')
|
||||
self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
|
||||
|
||||
# Get all workflows
|
||||
workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
|
||||
|
||||
# For each workflow, complete all tasks
|
||||
for workflow in workflows:
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
|
||||
|
||||
# Log current user out.
|
||||
g.user = None
|
||||
self.assertIsNone(g.user)
|
||||
|
||||
# Complete the form for Step one and post it.
|
||||
self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
|
||||
|
||||
# Get the next Task
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual("StepTwo", workflow_api.next_task.name)
|
||||
|
||||
# Get all user Tasks and check that the data have been saved
|
||||
task = workflow_api.next_task
|
||||
self.assertIsNotNone(task.data)
|
||||
for val in task.data.values():
|
||||
self.assertIsNotNone(val)
|
||||
|
||||
rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_all_approvals(self):
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
not_canceled = session.query(ApprovalModel).filter(ApprovalModel.status != 'CANCELED').all()
|
||||
not_canceled_study_ids = []
|
||||
for a in not_canceled:
|
||||
if a.study_id not in not_canceled_study_ids:
|
||||
not_canceled_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=false', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(not_canceled_study_ids), 'Should return all non-canceled approvals, grouped by study')
|
||||
|
||||
all_approvals = session.query(ApprovalModel).all()
|
||||
all_approvals_study_ids = []
|
||||
for a in all_approvals:
|
||||
if a.study_id not in all_approvals_study_ids:
|
||||
all_approvals_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=true', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(all_approvals_study_ids), 'Should return all approvals, grouped by study')
|
||||
|
||||
def test_approvals_counts(self):
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
# Get the counts
|
||||
rv_counts = self.app.get(f'/v1.0/approval-counts', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_counts)
|
||||
counts = json.loads(rv_counts.get_data(as_text=True))
|
||||
|
||||
# Get the actual approvals
|
||||
rv_approvals = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_approvals)
|
||||
approvals = json.loads(rv_approvals.get_data(as_text=True))
|
||||
|
||||
# Tally up the number of approvals in each status category
|
||||
manual_counts = {}
|
||||
for status in statuses:
|
||||
manual_counts[status] = 0
|
||||
|
||||
for approval in approvals:
|
||||
manual_counts[approval['status']] += 1
|
||||
|
||||
# Numbers in each category should match
|
||||
for status in statuses:
|
||||
self.assertEqual(counts[status], manual_counts[status], 'Approval counts for status %s should match' % status)
|
||||
|
||||
# Total number of approvals should match
|
||||
total_counts = sum(counts[status] for status in statuses)
|
||||
self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
|
||||
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||
workflow_spec_name="random_fact"):
|
||||
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||
approvals = []
|
||||
|
||||
for i in range(len(approver_uids)):
|
||||
approvals.append(self.create_approval(
|
||||
study=study,
|
||||
workflow=workflow,
|
||||
approver_uid=approver_uids[i],
|
||||
status=statuses[i],
|
||||
version=1
|
||||
))
|
||||
|
||||
return {
|
||||
'study': study,
|
||||
'workflow': workflow,
|
||||
'approvals': approvals,
|
||||
}
|
||||
|
||||
def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
|
||||
num_studies_before = db.session.query(StudyModel).count()
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
|
||||
# Add a whole bunch of approvals with random statuses
|
||||
for i in range(n):
|
||||
approver_uids = random.choices(["lb3dp", "dhf8r"])
|
||||
self._create_study_workflow_approvals(
|
||||
user_uid=random.choice(["lb3dp", "dhf8r"]),
|
||||
title="".join(random.choices(string.ascii_lowercase, k=64)),
|
||||
primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
|
||||
approver_uids=approver_uids,
|
||||
statuses=random.choices(statuses, k=len(approver_uids)),
|
||||
workflow_spec_name=workflow_spec_name
|
||||
)
|
||||
|
||||
session.flush()
|
||||
num_studies_after = db.session.query(StudyModel).count()
|
||||
self.assertEqual(num_studies_after, num_studies_before + n)
|
||||
|
||||
|
|
|
@ -1,29 +1,73 @@
|
|||
from tests.base_test import BaseTest
|
||||
import json
|
||||
from calendar import timegm
|
||||
from datetime import timezone, datetime, timedelta
|
||||
|
||||
from crc import db
|
||||
import jwt
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import db, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudySchema, StudyModel
|
||||
from crc.models.user import UserModel
|
||||
|
||||
|
||||
class TestAuthentication(BaseTest):
|
||||
|
||||
def test_auth_token(self):
|
||||
self.load_example_data()
|
||||
user = UserModel(uid="dhf8r")
|
||||
auth_token = user.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token, bytes))
|
||||
self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
|
||||
def tearDown(self):
|
||||
# Assure we set the production flag back to false.
|
||||
app.config['PRODUCTION'] = False
|
||||
super().tearDown()
|
||||
|
||||
def test_backdoor_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||
def test_auth_token(self):
|
||||
# Save the orginal timeout setting
|
||||
orig_ttl = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
# Set the timeout to something else
|
||||
new_ttl = 4.0
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = new_ttl
|
||||
user_1 = UserModel(uid="dhf8r")
|
||||
expected_exp_1 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||
auth_token_1 = user_1.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_1, bytes))
|
||||
self.assertEqual("dhf8r", user_1.decode_auth_token(auth_token_1).get("sub"))
|
||||
actual_exp_1 = user_1.decode_auth_token(auth_token_1).get("exp")
|
||||
self.assertTrue(expected_exp_1 - 1000 <= actual_exp_1 <= expected_exp_1 + 1000)
|
||||
|
||||
# Set the timeout to something else
|
||||
neg_ttl = -0.01
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = neg_ttl
|
||||
user_2 = UserModel(uid="dhf8r")
|
||||
expected_exp_2 = timegm((datetime.utcnow() + timedelta(hours=neg_ttl)).utctimetuple())
|
||||
auth_token_2 = user_2.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_2, bytes))
|
||||
with self.assertRaises(ApiError) as api_error:
|
||||
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
|
||||
user_2.decode_auth_token(auth_token_2)
|
||||
self.assertEqual(api_error.exception.status_code, 400, 'Should raise an API Error if token is expired')
|
||||
|
||||
# Set the timeout back to where it was
|
||||
app.config['TOKEN_AUTH_TTL_HOURS'] = orig_ttl
|
||||
user_3 = UserModel(uid="dhf8r")
|
||||
expected_exp_3 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||
auth_token_3 = user_3.encode_auth_token()
|
||||
self.assertTrue(isinstance(auth_token_3, bytes))
|
||||
actual_exp_3 = user_3.decode_auth_token(auth_token_1).get("exp")
|
||||
self.assertTrue(expected_exp_3 - 1000 <= actual_exp_3 <= expected_exp_3 + 1000)
|
||||
|
||||
def test_non_production_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
|
||||
user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator',
|
||||
'email_address': 'czn1z@virginia.edu'}
|
||||
'email_address': 'czn1z@virginia.edu'}
|
||||
redirect_url = 'http://worlds.best.website/admin'
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
url = '/v1.0/sso_backdoor%s' % query_string
|
||||
url = '/v1.0/login%s' % query_string
|
||||
rv_1 = self.app.get(url, follow_redirects=False)
|
||||
self.assertTrue(rv_1.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv_1.location, redirect_url))
|
||||
|
@ -38,22 +82,30 @@ class TestAuthentication(BaseTest):
|
|||
self.assertTrue(rv_2.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv_2.location, redirect_url))
|
||||
|
||||
def test_normal_auth_creates_user(self):
|
||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||
def test_production_auth_creates_user(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
|
||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||
self.assertIsNone(user)
|
||||
redirect_url = 'http://worlds.best.website/admin'
|
||||
headers = dict(Uid=new_uid)
|
||||
db.session.flush()
|
||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
|
||||
|
||||
self.assert_success(rv)
|
||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
self.assertEqual(new_uid, user.uid)
|
||||
self.assertEqual("Laura Barnes", user.display_name)
|
||||
self.assertEqual("lb3dp@virginia.edu", user.email_address)
|
||||
self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title)
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def test_current_user_status(self):
|
||||
self.load_example_data()
|
||||
|
@ -67,3 +119,108 @@ class TestAuthentication(BaseTest):
|
|||
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
|
||||
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_admin_can_access_admin_only_endpoints(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
admin_uids = app.config['ADMIN_UIDS']
|
||||
self.assertGreater(len(admin_uids), 0)
|
||||
admin_uid = admin_uids[0]
|
||||
self.assertEqual(admin_uid, 'dhf8r') # This user is in the test ldap system.
|
||||
admin_headers = dict(Uid=admin_uid)
|
||||
|
||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers)
|
||||
self.assert_success(rv)
|
||||
|
||||
admin_user = db.session.query(UserModel).filter(UserModel.uid == admin_uid).first()
|
||||
self.assertIsNotNone(admin_user)
|
||||
self.assertEqual(admin_uid, admin_user.uid)
|
||||
|
||||
admin_study = self._make_fake_study(admin_uid)
|
||||
|
||||
admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode())
|
||||
|
||||
rv_add_study = self.app.post(
|
||||
'/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=admin_token_headers,
|
||||
data=json.dumps(StudySchema().dump(admin_study)),
|
||||
follow_redirects=False
|
||||
)
|
||||
self.assert_success(rv_add_study, 'Admin user should be able to add a study')
|
||||
|
||||
new_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||
db_admin_study = db.session.query(StudyModel).filter_by(id=new_admin_study['id']).first()
|
||||
self.assertIsNotNone(db_admin_study)
|
||||
|
||||
rv_del_study = self.app.delete(
|
||||
'/v1.0/study/%i' % db_admin_study.id,
|
||||
follow_redirects=False,
|
||||
headers=admin_token_headers
|
||||
)
|
||||
self.assert_success(rv_del_study, 'Admin user should be able to delete a study')
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def test_nonadmin_cannot_access_admin_only_endpoints(self):
|
||||
# Switch production mode on
|
||||
app.config['PRODUCTION'] = True
|
||||
|
||||
self.load_example_data()
|
||||
|
||||
# Non-admin user should not be able to delete a study
|
||||
non_admin_uid = 'lb3dp'
|
||||
admin_uids = app.config['ADMIN_UIDS']
|
||||
self.assertGreater(len(admin_uids), 0)
|
||||
self.assertNotIn(non_admin_uid, admin_uids)
|
||||
|
||||
non_admin_headers = dict(Uid=non_admin_uid)
|
||||
|
||||
rv = self.app.get(
|
||||
'v1.0/login',
|
||||
follow_redirects=False,
|
||||
headers=non_admin_headers
|
||||
)
|
||||
self.assert_success(rv)
|
||||
|
||||
non_admin_user = db.session.query(UserModel).filter_by(uid=non_admin_uid).first()
|
||||
self.assertIsNotNone(non_admin_user)
|
||||
|
||||
non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode())
|
||||
|
||||
non_admin_study = self._make_fake_study(non_admin_uid)
|
||||
|
||||
rv_add_study = self.app.post(
|
||||
'/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=non_admin_token_headers,
|
||||
data=json.dumps(StudySchema().dump(non_admin_study))
|
||||
)
|
||||
self.assert_success(rv_add_study, 'Non-admin user should be able to add a study')
|
||||
|
||||
new_non_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||
db_non_admin_study = db.session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first()
|
||||
self.assertIsNotNone(db_non_admin_study)
|
||||
|
||||
rv_non_admin_del_study = self.app.delete(
|
||||
'/v1.0/study/%i' % db_non_admin_study.id,
|
||||
follow_redirects=False,
|
||||
headers=non_admin_token_headers
|
||||
)
|
||||
self.assert_failure(rv_non_admin_del_study, 401)
|
||||
|
||||
# Switch production mode back off
|
||||
app.config['PRODUCTION'] = False
|
||||
|
||||
def _make_fake_study(self, uid):
|
||||
return {
|
||||
"title": "blah",
|
||||
"last_updated": datetime.now(tz=timezone.utc),
|
||||
"protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
|
||||
"primary_investigator_id": uid,
|
||||
"user_uid": uid,
|
||||
}
|
||||
|
|
|
@ -157,10 +157,12 @@ class TestStudyService(BaseTest):
|
|||
|
||||
def test_get_all_studies(self):
|
||||
user = self.create_user_with_study_and_workflow()
|
||||
study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
|
||||
self.assertIsNotNone(study)
|
||||
|
||||
# Add a document to the study with the correct code.
|
||||
workflow1 = self.create_workflow('docx')
|
||||
workflow2 = self.create_workflow('empty_workflow')
|
||||
workflow1 = self.create_workflow('docx', study=study)
|
||||
workflow2 = self.create_workflow('empty_workflow', study=study)
|
||||
|
||||
# Add files to both workflows.
|
||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||
|
|
|
@ -4,85 +4,14 @@ import random
|
|||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual("dhf8r", event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
||||
|
||||
def test_get_current_user_tasks(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
|
@ -185,6 +114,7 @@ class TestTasksApi(BaseTest):
|
|||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('docx')
|
||||
|
||||
# get the first form in the two form workflow.
|
||||
task = self.get_workflow_api(workflow).next_task
|
||||
data = {
|
||||
|
@ -203,6 +133,7 @@ class TestTasksApi(BaseTest):
|
|||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
files = FileModelSchema(many=True).load(json_data, session=session)
|
||||
self.assertTrue(len(files) == 1)
|
||||
|
||||
# Assure we can still delete the study even when there is a file attached to a workflow.
|
||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
|
Loading…
Reference in New Issue