diff --git a/.travis.yml b/.travis.yml
index fba238f6..9ca51691 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,7 @@
language: python
python:
- - "3.7"
+ - "3.6.9"
services:
- postgresql
diff --git a/Dockerfile b/Dockerfile
index 8ff7af23..fae1657f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,27 +1,24 @@
-FROM python:3.7
+FROM python:3.6.9-slim
-ENV PATH=/root/.local/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
+WORKDIR /app
-# install node and yarn
-RUN apt-get update
-RUN apt-get -y install postgresql-client
+COPY Pipfile Pipfile.lock /app/
-# config project dir
-RUN mkdir /crc-workflow
-WORKDIR /crc-workflow
+RUN pip install pipenv && \
+ apt-get update && \
+ apt-get install -y --no-install-recommends \
+ gcc python3-dev libssl-dev \
+ curl postgresql-client git-core && \
+ pipenv install --dev && \
+ apt-get remove -y gcc python3-dev libssl-dev && \
+ apt-get purge -y --auto-remove && \
+ rm -rf /var/lib/apt/lists/ *
-# install python requirements
-RUN pip install pipenv
-ADD Pipfile /crc-workflow/
-ADD Pipfile.lock /crc-workflow/
-RUN pipenv install --dev
+COPY . /app/
-# include rejoiner code (gets overriden by local changes)
-COPY . /crc-workflow/
-
-# run webserver by default
-ENV FLASK_APP=./crc/__init__.py
-CMD ["pipenv", "run", "python", "./run.py"]
+ENV FLASK_APP=/app/crc/__init__.py
+CMD ["pipenv", "run", "flask", "db", "upgrade"]
+CMD ["pipenv", "run", "python", "/app/run.py"]
# expose ports
EXPOSE 5000
diff --git a/Pipfile b/Pipfile
index 6f374722..77c70afc 100644
--- a/Pipfile
+++ b/Pipfile
@@ -24,18 +24,17 @@ pyjwt = "*"
requests = "*"
xlsxwriter = "*"
webtest = "*"
-spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "bug/the_horror"}
+spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"}
alembic = "*"
coverage = "*"
sphinx = "*"
recommonmark = "*"
psycopg2-binary = "*"
docxtpl = "*"
-flask-sso = "*"
python-dateutil = "*"
pandas = "*"
xlrd = "*"
ldap3 = "*"
[requires]
-python_version = "3.7"
+python_version = "3.6.9"
diff --git a/Pipfile.lock b/Pipfile.lock
index 4f8b70c9..036d4bf9 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,11 +1,11 @@
{
"_meta": {
"hash": {
- "sha256": "bd289126c41b0f5f2761f0415d85e1110a584256460374a9ce4cda07c0033ddd"
+ "sha256": "1ca737db75750ea4351c15b4b0b26155d90bc5522705ed293a0c2773600b6a0a"
},
"pipfile-spec": 6,
"requires": {
- "python_version": "3.7"
+ "python_version": "3.6.9"
},
"sources": [
{
@@ -96,12 +96,6 @@
],
"version": "==3.6.3.0"
},
- "blinker": {
- "hashes": [
- "sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
- ],
- "version": "==1.4"
- },
"celery": {
"hashes": [
"sha256:108a0bf9018a871620936c33a3ee9f6336a89f8ef0a0f567a9001f4aa361415f",
@@ -307,13 +301,6 @@
],
"version": "==2.4.1"
},
- "flask-sso": {
- "hashes": [
- "sha256:541a8a2387c6eac4325c53f8f7f863a03173b37aa558a37a430010d7fc1a3633"
- ],
- "index": "pypi",
- "version": "==0.4.0"
- },
"future": {
"hashes": [
"sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"
@@ -401,35 +388,35 @@
},
"lxml": {
"hashes": [
- "sha256:06d4e0bbb1d62e38ae6118406d7cdb4693a3fa34ee3762238bcb96c9e36a93cd",
- "sha256:0701f7965903a1c3f6f09328c1278ac0eee8f56f244e66af79cb224b7ef3801c",
- "sha256:1f2c4ec372bf1c4a2c7e4bb20845e8bcf8050365189d86806bad1e3ae473d081",
- "sha256:4235bc124fdcf611d02047d7034164897ade13046bda967768836629bc62784f",
- "sha256:5828c7f3e615f3975d48f40d4fe66e8a7b25f16b5e5705ffe1d22e43fb1f6261",
- "sha256:585c0869f75577ac7a8ff38d08f7aac9033da2c41c11352ebf86a04652758b7a",
- "sha256:5d467ce9c5d35b3bcc7172c06320dddb275fea6ac2037f72f0a4d7472035cea9",
- "sha256:63dbc21efd7e822c11d5ddbedbbb08cd11a41e0032e382a0fd59b0b08e405a3a",
- "sha256:7bc1b221e7867f2e7ff1933165c0cec7153dce93d0cdba6554b42a8beb687bdb",
- "sha256:8620ce80f50d023d414183bf90cc2576c2837b88e00bea3f33ad2630133bbb60",
- "sha256:8a0ebda56ebca1a83eb2d1ac266649b80af8dd4b4a3502b2c1e09ac2f88fe128",
- "sha256:90ed0e36455a81b25b7034038e40880189169c308a3df360861ad74da7b68c1a",
- "sha256:95e67224815ef86924fbc2b71a9dbd1f7262384bca4bc4793645794ac4200717",
- "sha256:afdb34b715daf814d1abea0317b6d672476b498472f1e5aacbadc34ebbc26e89",
- "sha256:b4b2c63cc7963aedd08a5f5a454c9f67251b1ac9e22fd9d72836206c42dc2a72",
- "sha256:d068f55bda3c2c3fcaec24bd083d9e2eede32c583faf084d6e4b9daaea77dde8",
- "sha256:d5b3c4b7edd2e770375a01139be11307f04341ec709cf724e0f26ebb1eef12c3",
- "sha256:deadf4df349d1dcd7b2853a2c8796593cc346600726eff680ed8ed11812382a7",
- "sha256:df533af6f88080419c5a604d0d63b2c33b1c0c4409aba7d0cb6de305147ea8c8",
- "sha256:e4aa948eb15018a657702fee0b9db47e908491c64d36b4a90f59a64741516e77",
- "sha256:e5d842c73e4ef6ed8c1bd77806bf84a7cb535f9c0cf9b2c74d02ebda310070e1",
- "sha256:ebec08091a22c2be870890913bdadd86fcd8e9f0f22bcb398abd3af914690c15",
- "sha256:edc15fcfd77395e24543be48871c251f38132bb834d9fdfdad756adb6ea37679",
- "sha256:f2b74784ed7e0bc2d02bd53e48ad6ba523c9b36c194260b7a5045071abbb1012",
- "sha256:fa071559f14bd1e92077b1b5f6c22cf09756c6de7139370249eb372854ce51e6",
- "sha256:fd52e796fee7171c4361d441796b64df1acfceb51f29e545e812f16d023c4bbc",
- "sha256:fe976a0f1ef09b3638778024ab9fb8cde3118f203364212c198f71341c0715ca"
+ "sha256:06748c7192eab0f48e3d35a7adae609a329c6257495d5e53878003660dc0fec6",
+ "sha256:0790ddca3f825dd914978c94c2545dbea5f56f008b050e835403714babe62a5f",
+ "sha256:1aa7a6197c1cdd65d974f3e4953764eee3d9c7b67e3966616b41fab7f8f516b7",
+ "sha256:22c6d34fdb0e65d5f782a4d1a1edb52e0a8365858dafb1c08cb1d16546cf0786",
+ "sha256:2754d4406438c83144f9ffd3628bbe2dcc6d62b20dbc5c1ec4bc4385e5d44b42",
+ "sha256:27ee0faf8077c7c1a589573b1450743011117f1aa1a91d5ae776bbc5ca6070f2",
+ "sha256:2b02c106709466a93ed424454ce4c970791c486d5fcdf52b0d822a7e29789626",
+ "sha256:2d1ddce96cf15f1254a68dba6935e6e0f1fe39247de631c115e84dd404a6f031",
+ "sha256:4f282737d187ae723b2633856085c31ae5d4d432968b7f3f478a48a54835f5c4",
+ "sha256:51bb4edeb36d24ec97eb3e6a6007be128b720114f9a875d6b370317d62ac80b9",
+ "sha256:7eee37c1b9815e6505847aa5e68f192e8a1b730c5c7ead39ff317fde9ce29448",
+ "sha256:7fd88cb91a470b383aafad554c3fe1ccf6dfb2456ff0e84b95335d582a799804",
+ "sha256:9144ce36ca0824b29ebc2e02ca186e54040ebb224292072250467190fb613b96",
+ "sha256:925baf6ff1ef2c45169f548cc85204433e061360bfa7d01e1be7ae38bef73194",
+ "sha256:a636346c6c0e1092ffc202d97ec1843a75937d8c98aaf6771348ad6422e44bb0",
+ "sha256:a87dbee7ad9dce3aaefada2081843caf08a44a8f52e03e0a4cc5819f8398f2f4",
+ "sha256:a9e3b8011388e7e373565daa5e92f6c9cb844790dc18e43073212bb3e76f7007",
+ "sha256:afb53edf1046599991fb4a7d03e601ab5f5422a5435c47ee6ba91ec3b61416a6",
+ "sha256:b26719890c79a1dae7d53acac5f089d66fd8cc68a81f4e4bd355e45470dc25e1",
+ "sha256:b7462cdab6fffcda853338e1741ce99706cdf880d921b5a769202ea7b94e8528",
+ "sha256:b77975465234ff49fdad871c08aa747aae06f5e5be62866595057c43f8d2f62c",
+ "sha256:c47a8a5d00060122ca5908909478abce7bbf62d812e3fc35c6c802df8fb01fe7",
+ "sha256:c79e5debbe092e3c93ca4aee44c9a7631bdd407b2871cb541b979fd350bbbc29",
+ "sha256:d8d40e0121ca1606aa9e78c28a3a7d88a05c06b3ca61630242cded87d8ce55fa",
+ "sha256:ee2be8b8f72a2772e72ab926a3bccebf47bb727bda41ae070dc91d1fb759b726",
+ "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9",
+ "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529"
],
- "version": "==4.5.0"
+ "version": "==4.5.1"
},
"mako": {
"hashes": [
@@ -543,10 +530,10 @@
},
"packaging": {
"hashes": [
- "sha256:3c292b474fda1671ec57d46d739d072bfd495a4f51ad01a055121d81e952b7a3",
- "sha256:82f77b9bee21c1bafbf35a84905d604d5d1223801d639cf3ed140bd651c08752"
+ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8",
+ "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"
],
- "version": "==20.3"
+ "version": "==20.4"
},
"pandas": {
"hashes": [
@@ -711,10 +698,10 @@
},
"six": {
"hashes": [
- "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
- "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
+ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
+ "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
- "version": "==1.14.0"
+ "version": "==1.15.0"
},
"snowballstemmer": {
"hashes": [
@@ -783,7 +770,7 @@
"spiffworkflow": {
"editable": true,
"git": "https://github.com/sartography/SpiffWorkflow.git",
- "ref": "070d80fd670e129aae7ee949b3e66cc744520e49"
+ "ref": "cb098ee6d55b85bf7795997f4ad5f78c27d15381"
},
"sqlalchemy": {
"hashes": [
@@ -919,10 +906,10 @@
},
"packaging": {
"hashes": [
- "sha256:3c292b474fda1671ec57d46d739d072bfd495a4f51ad01a055121d81e952b7a3",
- "sha256:82f77b9bee21c1bafbf35a84905d604d5d1223801d639cf3ed140bd651c08752"
+ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8",
+ "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"
],
- "version": "==20.3"
+ "version": "==20.4"
},
"pluggy": {
"hashes": [
@@ -955,10 +942,10 @@
},
"six": {
"hashes": [
- "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
- "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
+ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
+ "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
- "version": "==1.14.0"
+ "version": "==1.15.0"
},
"wcwidth": {
"hashes": [
diff --git a/config/default.py b/config/default.py
index dd19f2ab..d2486f86 100644
--- a/config/default.py
+++ b/config/default.py
@@ -27,19 +27,6 @@ TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! T
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
-#: Default attribute map for single signon.
-SSO_LOGIN_URL = '/login'
-SSO_ATTRIBUTE_MAP = {
- 'eppn': (False, 'eppn'), # dhf8r@virginia.edu
- 'uid': (True, 'uid'), # dhf8r
- 'givenName': (False, 'first_name'), # Daniel
- 'mail': (False, 'email_address'), # dhf8r@Virginia.EDU
- 'sn': (False, 'last_name'), # Funk
- 'affiliation': (False, 'affiliation'), # 'staff@virginia.edu;member@virginia.edu'
- 'displayName': (False, 'display_name'), # Daniel Harold Funk
- 'title': (False, 'title') # SOFTWARE ENGINEER V
-}
-
# %s/%i placeholders expected for uva_id and study_id in various calls.
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/pb/")
PB_USER_STUDIES_URL = environ.get('PB_USER_STUDIES_URL', default=PB_BASE_URL + "user_studies?uva_id=%s")
diff --git a/crc/__init__.py b/crc/__init__.py
index aa301108..9f1c4ee3 100644
--- a/crc/__init__.py
+++ b/crc/__init__.py
@@ -6,7 +6,6 @@ from flask_cors import CORS
from flask_marshmallow import Marshmallow
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
-from flask_sso import SSO
logging.basicConfig(level=logging.INFO)
@@ -31,7 +30,6 @@ session = db.session
migrate = Migrate(app, db)
ma = Marshmallow(app)
-sso = SSO(app=app)
from crc import models
from crc import api
diff --git a/crc/api/user.py b/crc/api/user.py
index 83245d19..6924eb27 100644
--- a/crc/api/user.py
+++ b/crc/api/user.py
@@ -1,12 +1,12 @@
import json
import connexion
-from flask import redirect, g
+from flask import redirect, g, request
-from crc import sso, app, db
+from crc import app, db
from crc.api.common import ApiError
from crc.models.user import UserModel, UserModelSchema
-
+from crc.services.ldap_service import LdapService, LdapUserInfo
"""
.. module:: crc.api.user
@@ -32,53 +32,76 @@ def verify_token(token):
def get_current_user():
return UserModelSchema().dump(g.user)
+@app.route('/login')
+def sso_login():
+ # This what I see coming back:
+ # X-Remote-Cn: Daniel Harold Funk (dhf8r)
+ # X-Remote-Sn: Funk
+ # X-Remote-Givenname: Daniel
+ # X-Remote-Uid: dhf8r
+ # Eppn: dhf8r@virginia.edu
+ # Cn: Daniel Harold Funk (dhf8r)
+ # Sn: Funk
+ # Givenname: Daniel
+ # Uid: dhf8r
+ # X-Remote-User: dhf8r@virginia.edu
+ # X-Forwarded-For: 128.143.0.10
+ # X-Forwarded-Host: dev.crconnect.uvadcos.io
+ # X-Forwarded-Server: dev.crconnect.uvadcos.io
+ # Connection: Keep-Alive
+ uid = request.headers.get("Uid")
+ if not uid:
+ uid = request.headers.get("X-Remote-Uid")
-@sso.login_handler
-def sso_login(user_info):
- app.logger.info("Login from Shibboleth happening. " + json.dump(user_info))
- # TODO: Get redirect URL from Shibboleth request header
- _handle_login(user_info)
+ if not uid:
+ raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
+ % str(request.headers))
+
+ redirect = request.args.get('redirect')
+ app.logger.info("SSO_LOGIN: Full URL: " + request.url)
+ app.logger.info("SSO_LOGIN: User Id: " + uid)
+ app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
+
+ ldap_service = LdapService()
+ info = ldap_service.user_info(uid)
+
+ return _handle_login(info, redirect)
+
+@app.route('/sso')
+def sso():
+ response = ""
+ response += "
Headers
"
+ response += ""
+ for k,v in request.headers:
+ response += "- %s %s
\n" % (k, v)
+ response += "Environment
"
+ for k,v in request.environ:
+ response += "- %s %s
\n" % (k, v)
+ return response
-def _handle_login(user_info, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
+def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
"""On successful login, adds user to database if the user is not already in the system,
then returns the frontend auth callback URL, with auth token appended.
Args:
- user_info (dict of {
- uid: str,
- affiliation: Optional[str],
- display_name: Optional[str],
- email_address: Optional[str],
- eppn: Optional[str],
- first_name: Optional[str],
- last_name: Optional[str],
- title: Optional[str],
- }): Dictionary of user attributes
- redirect_url: Optional[str]
+ user_info - an ldap user_info object.
+ redirect_url: Optional[str]
Returns:
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
"""
- uid = user_info['uid']
- user = db.session.query(UserModel).filter(UserModel.uid == uid).first()
+ user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
if user is None:
# Add new user
- user = UserModelSchema().load(user_info, session=db.session)
- else:
- # Update existing user data
- user = UserModelSchema().load(user_info, session=db.session, instance=user, partial=True)
+ user = UserModel()
- # Build display_name if not set
- if 'display_name' not in user_info or len(user_info['display_name']) == 0:
- display_name_list = []
-
- for prop in ['first_name', 'last_name']:
- if prop in user_info and len(user_info[prop]) > 0:
- display_name_list.append(user_info[prop])
-
- user.display_name = ' '.join(display_name_list)
+ user.uid = user_info.uid
+ user.display_name = user_info.display_name
+ user.email_address = user_info.email_address
+ user.affiliation = user_info.affiliation
+ user.title = user_info.title
db.session.add(user)
db.session.commit()
@@ -86,10 +109,14 @@ def _handle_login(user_info, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
# Return the frontend auth callback URL, with auth token appended.
auth_token = user.encode_auth_token().decode()
if redirect_url is not None:
+ app.logger.info("SSO_LOGIN: REDIRECTING TO: " + redirect_url)
return redirect('%s/%s' % (redirect_url, auth_token))
else:
+ app.logger.info("SSO_LOGIN: NO REDIRECT, JUST RETURNING AUTH TOKEN.")
return auth_token
+
+
def backdoor(
uid=None,
affiliation=None,
@@ -122,11 +149,9 @@ def backdoor(
ApiError. If on production, returns a 404 error.
"""
if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
- user_info = {}
- for key in UserModel.__dict__.keys():
- if key in connexion.request.args:
- user_info[key] = connexion.request.args[key]
-
- return _handle_login(user_info, redirect_url)
+ ldap_info = LdapUserInfo()
+ ldap_info.uid = connexion.request.args["uid"]
+ ldap_info.email_address = connexion.request.args["email_address"]
+ return _handle_login(ldap_info, redirect_url)
else:
raise ApiError('404', 'unknown')
diff --git a/crc/api/workflow.py b/crc/api/workflow.py
index 5d5b2af5..4ad02f0e 100644
--- a/crc/api/workflow.py
+++ b/crc/api/workflow.py
@@ -7,6 +7,7 @@ from crc.models.file import FileModel, LookupDataSchema
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \
WorkflowSpecCategoryModelSchema
from crc.services.file_service import FileService
+from crc.services.lookup_service import LookupService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.workflow_service import WorkflowService
@@ -217,9 +218,9 @@ def delete_workflow_spec_category(cat_id):
def lookup(workflow_id, task_id, field_id, query, limit):
"""
- given a field in a task, attempts to find the lookup table associated with that field
- and runs a full-text query against it to locate the values and labels that would be
- returned to a type-ahead box.
+ given a field in a task, attempts to find the lookup table or function associated
+ with that field and runs a full-text query against it to locate the values and
+ labels that would be returned to a type-ahead box.
"""
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
if not workflow_model:
@@ -236,6 +237,5 @@ def lookup(workflow_id, task_id, field_id, query, limit):
if not field:
raise ApiError("unknown_field", "No field named %s in task %s" % (task_id, spiff_task.task_spec.name))
- lookup_table = WorkflowService.get_lookup_table(spiff_task, field)
- lookup_data = WorkflowService.run_lookup_query(lookup_table, query, limit)
+ lookup_data = LookupService.lookup(spiff_task, field, query, limit)
return LookupDataSchema(many=True).dump(lookup_data)
\ No newline at end of file
diff --git a/crc/models/api_models.py b/crc/models/api_models.py
index 4e7d4304..4b279965 100644
--- a/crc/models/api_models.py
+++ b/crc/models/api_models.py
@@ -31,10 +31,12 @@ class NavigationItem(object):
class Task(object):
- ENUM_OPTIONS_FILE_PROP = "enum.options.file"
- EMUM_OPTIONS_VALUE_COL_PROP = "enum.options.value.column"
- EMUM_OPTIONS_LABEL_COL_PROP = "enum.options.label.column"
- EMUM_OPTIONS_AS_LOOKUP = "enum.options.lookup"
+ PROP_OPTIONS_FILE = "spreadsheet.name"
+ PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
+ PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
+ PROP_LDAP_LOOKUP = "ldap.lookup"
+ FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
+
def __init__(self, id, name, title, type, state, form, documentation, data,
multi_instance_type, multi_instance_count, multi_instance_index, process_name, properties):
diff --git a/crc/scripts/complete_template.py b/crc/scripts/complete_template.py
index 4fc7eb16..64ab9531 100644
--- a/crc/scripts/complete_template.py
+++ b/crc/scripts/complete_template.py
@@ -1,12 +1,14 @@
import copy
+import re
from io import BytesIO
import jinja2
-from docxtpl import DocxTemplate, Listing
+from docx.shared import Inches
+from docxtpl import DocxTemplate, Listing, InlineImage
from crc import session
from crc.api.common import ApiError
-from crc.models.file import CONTENT_TYPES
+from crc.models.file import CONTENT_TYPES, FileModel, FileDataModel
from crc.models.workflow import WorkflowModel
from crc.scripts.script import Script
from crc.services.file_service import FileService
@@ -27,12 +29,12 @@ Takes two arguments:
def do_task_validate_only(self, task, study_id, *args, **kwargs):
"""For validation only, process the template, but do not store it in the database."""
- self.process_template(task, study_id, *args, **kwargs)
+ self.process_template(task, study_id, None, *args, **kwargs)
def do_task(self, task, study_id, *args, **kwargs):
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
- final_document_stream = self.process_template(task, study_id, *args, **kwargs)
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
+ final_document_stream = self.process_template(task, study_id, workflow, *args, **kwargs)
file_name = args[0]
irb_doc_code = args[1]
FileService.add_task_file(study_id=study_id,
@@ -44,9 +46,9 @@ Takes two arguments:
binary_data=final_document_stream.read(),
irb_doc_code=irb_doc_code)
- def process_template(self, task, study_id, *args, **kwargs):
+ def process_template(self, task, study_id, workflow=None, *args, **kwargs):
"""Entry point, mostly worried about wiring it all up."""
- if len(args) != 2:
+ if len(args) < 2 or len(args) > 3:
raise ApiError(code="missing_argument",
message="The CompleteTemplate script requires 2 arguments. The first argument is "
"the name of the docx template to use. The second "
@@ -59,21 +61,85 @@ Takes two arguments:
raise ApiError(code="invalid_argument",
message="The given task does not match the given study.")
- file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
- return self.make_template(BytesIO(file_data_model.data), task.data)
+ file_data_model = None
+ if workflow is not None:
+ # Get the workflow's latest files
+ joined_file_data_models = WorkflowProcessor\
+ .get_file_models_for_version(workflow.workflow_spec_id, workflow.spec_version)
+ for joined_file_data in joined_file_data_models:
+ if joined_file_data.file_model.name == file_name:
+ file_data_model = session.query(FileDataModel).filter_by(id=joined_file_data.id).first()
- def make_template(self, binary_stream, context):
+ if workflow is None or file_data_model is None:
+ file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
+
+ # Get images from file/files fields
+ if len(args) == 3:
+ image_file_data = self.get_image_file_data(args[2], task)
+ else:
+ image_file_data = None
+
+ return self.make_template(BytesIO(file_data_model.data), task.data, image_file_data)
+
+ def get_image_file_data(self, fields_str, task):
+ image_file_data = []
+ images_field_str = re.sub(r'[\[\]]', '', fields_str)
+ images_field_keys = [v.strip() for v in images_field_str.strip().split(',')]
+ for field_key in images_field_keys:
+ if field_key in task.data:
+ v = task.data[field_key]
+ file_ids = v if isinstance(v, list) else [v]
+
+ for file_id in file_ids:
+ if isinstance(file_id, str) and file_id.isnumeric():
+ file_id = int(file_id)
+
+ if file_id is not None and isinstance(file_id, int):
+ if not task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
+ # Get the actual image data
+ image_file_model = session.query(FileModel).filter_by(id=file_id).first()
+ image_file_data_model = FileService.get_file_data(file_id, image_file_model)
+ if image_file_data_model is not None:
+ image_file_data.append(image_file_data_model)
+
+ else:
+ raise ApiError(
+ code="not_a_file_id",
+ message="The CompleteTemplate script requires 2-3 arguments. The third argument should "
+ "be a comma-delimited list of File IDs")
+
+ return image_file_data
+
+ def make_template(self, binary_stream, context, image_file_data=None):
doc = DocxTemplate(binary_stream)
doc_context = copy.deepcopy(context)
doc_context = self.rich_text_update(doc_context)
+ doc_context = self.append_images(doc, doc_context, image_file_data)
jinja_env = jinja2.Environment(autoescape=True)
doc.render(doc_context, jinja_env)
target_stream = BytesIO()
doc.save(target_stream)
- target_stream.seek(0) # move to the beginning of the stream.
+ target_stream.seek(0) # move to the beginning of the stream.
return target_stream
+ def append_images(self, template, context, image_file_data):
+ context['images'] = {}
+ if image_file_data is not None:
+ for file_data_model in image_file_data:
+ fm = file_data_model.file_model
+ if fm is not None:
+ context['images'][fm.id] = {
+ 'name': fm.name,
+ 'url': '/v1.0/file/%s/data' % fm.id,
+ 'image': self.make_image(file_data_model, template)
+ }
+
+ return context
+
+ def make_image(self, file_data_model, template):
+ return InlineImage(template, BytesIO(file_data_model.data), width=Inches(6.5))
+
def rich_text_update(self, context):
"""This is a bit of a hack. If we find that /n characters exist in the data, we want
these to come out in the final document without requiring someone to predict it in the
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index b19b7d6e..2ac92bdb 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -1,3 +1,4 @@
+import hashlib
import json
import os
from datetime import datetime
@@ -11,7 +12,6 @@ from crc.api.common import ApiError
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowSpecModel
from crc.services.workflow_processor import WorkflowProcessor
-import hashlib
class FileService(object):
@@ -110,13 +110,14 @@ class FileService(object):
@staticmethod
def update_file(file_model, binary_data, content_type):
+ session.flush() # Assure the database is up-to-date before running this.
file_data_model = session.query(FileDataModel). \
filter_by(file_model_id=file_model.id,
version=file_model.latest_version
).with_for_update().first()
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
- if (file_data_model is not None and md5_checksum == file_data_model.md5_hash):
+ if (file_data_model is not None) and (md5_checksum == file_data_model.md5_hash):
# This file does not need to be updated, it's the same file.
return file_model
@@ -141,12 +142,15 @@ class FileService(object):
file_model.primary_process_id = WorkflowProcessor.get_process_id(bpmn)
file_model.latest_version = version
- file_data_model = FileDataModel(data=binary_data, file_model=file_model, version=version,
- md5_hash=md5_checksum, last_updated=datetime.now())
+ new_file_data_model = FileDataModel(
+ data=binary_data, file_model_id=file_model.id, file_model=file_model,
+ version=version, md5_hash=md5_checksum, last_updated=datetime.now()
+ )
- session.add_all([file_model, file_data_model])
+ session.add_all([file_model, new_file_data_model])
session.commit()
session.flush() # Assure the id is set on the model before returning it.
+
return file_model
@staticmethod
@@ -156,18 +160,26 @@ class FileService(object):
query = session.query(FileModel).filter_by(is_reference=is_reference)
if workflow_spec_id:
query = query.filter_by(workflow_spec_id=workflow_spec_id)
- if study_id:
- query = query.filter_by(study_id=study_id)
- if workflow_id:
- query = query.filter_by(workflow_id=workflow_id)
- if task_id:
- query = query.filter_by(task_id=str(task_id))
- if form_field_key:
- query = query.filter_by(form_field_key=form_field_key)
- if name:
- query = query.filter_by(name=name)
- if irb_doc_code:
- query = query.filter_by(irb_doc_code=irb_doc_code)
+ if all(v is None for v in [study_id, workflow_id, task_id, form_field_key]):
+ query = query.filter_by(
+ study_id=None,
+ workflow_id=None,
+ task_id=None,
+ form_field_key=None,
+ )
+ else:
+ if study_id:
+ query = query.filter_by(study_id=study_id)
+ if workflow_id:
+ query = query.filter_by(workflow_id=workflow_id)
+ if task_id:
+ query = query.filter_by(task_id=str(task_id))
+ if form_field_key:
+ query = query.filter_by(form_field_key=form_field_key)
+ if name:
+ query = query.filter_by(name=name)
+ if irb_doc_code:
+ query = query.filter_by(irb_doc_code=irb_doc_code)
results = query.all()
return results
@@ -194,7 +206,7 @@ class FileService(object):
@staticmethod
def get_workflow_file_data(workflow, file_name):
- """Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns it's data"""
+ """Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
workflow_spec_model = FileService.find_spec_model_in_db(workflow)
if workflow_spec_model is None:
diff --git a/crc/services/ldap_service.py b/crc/services/ldap_service.py
index ea3e155c..d0bb3f7b 100644
--- a/crc/services/ldap_service.py
+++ b/crc/services/ldap_service.py
@@ -8,24 +8,36 @@ from crc.api.common import ApiError
class LdapUserInfo(object):
- def __init__(self, entry):
- self.display_name = entry.displayName.value
- self.given_name = ", ".join(entry.givenName)
- self.email = entry.mail.value
- self.telephone_number= ", ".join(entry.telephoneNumber)
- self.title = ", ".join(entry.title)
- self.department = ", ".join(entry.uvaDisplayDepartment)
- self.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
- self.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
-
-
+ def __init__(self):
+ self.display_name = ''
+ self.given_name = ''
+ self.email_address = ''
+ self.telephone_number = ''
+ self.title = ''
+ self.department = ''
+ self.affiliation = ''
+ self.sponsor_type = ''
+ self.uid = ''
+ @classmethod
+ def from_entry(cls, entry):
+ instance = cls()
+ instance.display_name = entry.displayName.value
+ instance.given_name = ", ".join(entry.givenName)
+ instance.email_address = entry.mail.value
+ instance.telephone_number = ", ".join(entry.telephoneNumber)
+ instance.title = ", ".join(entry.title)
+ instance.department = ", ".join(entry.uvaDisplayDepartment)
+ instance.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
+ instance.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
+ instance.uid = entry.uid.value
+ return instance
class LdapService(object):
search_base = "ou=People,o=University of Virginia,c=US"
- attributes = ['cn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
+ attributes = ['uid', 'cn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
'telephoneNumber', 'title', 'uvaPersonIAMAffiliation', 'uvaPersonSponsoredType']
- search_string = "(&(objectclass=person)(uid=%s))"
+ uid_search_string = "(&(objectclass=person)(uid=%s))"
def __init__(self):
if app.config['TESTING']:
@@ -46,9 +58,25 @@ class LdapService(object):
self.conn.unbind()
def user_info(self, uva_uid):
- search_string = LdapService.search_string % uva_uid
+ search_string = LdapService.uid_search_string % uva_uid
self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
if len(self.conn.entries) < 1:
raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
entry = self.conn.entries[0]
- return(LdapUserInfo(entry))
+ return LdapUserInfo.from_entry(entry)
+
+ def search_users(self, query, limit):
+ search_string = LdapService.uid_search_string % query
+ self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
+
+ # Entries are returned as a generator, accessing entries
+ # can make subsequent calls to the ldap service, so limit
+ # those here.
+ count = 0
+ results = []
+ for entry in self.conn.entries:
+ if count > limit:
+ break
+ results.append(LdapUserInfo.from_entry(entry))
+ count += 1
+ return results
diff --git a/crc/services/lookup_service.py b/crc/services/lookup_service.py
new file mode 100644
index 00000000..f9d023bc
--- /dev/null
+++ b/crc/services/lookup_service.py
@@ -0,0 +1,143 @@
+from pandas import ExcelFile
+
+from crc import db
+from crc.api.common import ApiError
+from crc.models.api_models import Task
+from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
+from crc.services.file_service import FileService
+from crc.services.ldap_service import LdapService
+
+
+class LookupService(object):
+
+ """Provides tools for doing lookups for auto-complete fields.
+ This can currently take two forms:
+ 1) Lookup from spreadsheet data associated with a workflow specification.
+ in which case we store the spreadsheet data in a lookup table with full
+ text indexing enabled, and run searches against that table.
+ 2) Lookup from LDAP records. In which case we call out to an external service
+ to pull back detailed records and return them.
+
+ I could imagine this growing to include other external services as tools to handle
+ lookup fields. I could also imagine using some sort of local cache so we don't
+ unnecessarily pound on external services for repeat searches for the same records.
+ """
+
+ @staticmethod
+ def lookup(spiff_task, field, query, limit):
+ """Executes the lookup for the given field."""
+ if field.type != Task.FIELD_TYPE_AUTO_COMPLETE:
+ raise ApiError.from_task("invalid_field_type",
+ "Field '%s' must be an autocomplete field to use lookups." % field.label,
+ task=spiff_task)
+
+ # If this field has an associated options file, then do the lookup against that field.
+ if field.has_property(Task.PROP_OPTIONS_FILE):
+ lookup_table = LookupService.get_lookup_table(spiff_task, field)
+ return LookupService._run_lookup_query(lookup_table, query, limit)
+ # If this is a ldap lookup, use the ldap service to provide the fields to return.
+ elif field.has_property(Task.PROP_LDAP_LOOKUP):
+ return LookupService._run_ldap_query(query, limit)
+ else:
+ raise ApiError.from_task("unknown_lookup_option",
+ "Lookup supports using spreadsheet options or ldap options, and neither was"
+ "provided.")
+
+ @staticmethod
+ def get_lookup_table(spiff_task, field):
+ """ Checks to see if the options are provided in a separate lookup table associated with the
+ workflow, and if so, assures that data exists in the database, and return a model than can be used
+ to locate that data.
+
+ Returns: an array of LookupData, suitable for returning to the api.
+ """
+ if field.has_property(Task.PROP_OPTIONS_FILE):
+ if not field.has_property(Task.PROP_OPTIONS_VALUE_COLUMN) or \
+ not field.has_property(Task.PROP_OPTIONS_LABEL_COL):
+ raise ApiError.from_task("invalid_emum",
+ "For enumerations based on an xls file, you must include 3 properties: %s, "
+ "%s, and %s" % (Task.PROP_OPTIONS_FILE,
+ Task.PROP_OPTIONS_VALUE_COLUMN,
+ Task.PROP_OPTIONS_LABEL_COL),
+ task=spiff_task)
+
+ # Get the file data from the File Service
+ file_name = field.get_property(Task.PROP_OPTIONS_FILE)
+ value_column = field.get_property(Task.PROP_OPTIONS_VALUE_COLUMN)
+ label_column = field.get_property(Task.PROP_OPTIONS_LABEL_COL)
+ data_model = FileService.get_workflow_file_data(spiff_task.workflow, file_name)
+ lookup_model = LookupService.get_lookup_table_from_data_model(data_model, value_column, label_column)
+ return lookup_model
+
+ @staticmethod
+ def get_lookup_table_from_data_model(data_model: FileDataModel, value_column, label_column):
+ """ In some cases the lookup table can be very large. This method will add all values to the database
+ in a way that can be searched and returned via an api call - rather than sending the full set of
+ options along with the form. It will only open the file and process the options if something has
+ changed. """
+
+ lookup_model = db.session.query(LookupFileModel) \
+ .filter(LookupFileModel.file_data_model_id == data_model.id) \
+ .filter(LookupFileModel.value_column == value_column) \
+ .filter(LookupFileModel.label_column == label_column).first()
+
+ if not lookup_model:
+ xls = ExcelFile(data_model.data)
+ df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
+ if value_column not in df:
+ raise ApiError("invalid_emum",
+ "The file %s does not contain a column named % s" % (data_model.file_model.name,
+ value_column))
+ if label_column not in df:
+ raise ApiError("invalid_emum",
+ "The file %s does not contain a column named % s" % (data_model.file_model.name,
+ label_column))
+
+ lookup_model = LookupFileModel(label_column=label_column, value_column=value_column,
+ file_data_model_id=data_model.id)
+
+ db.session.add(lookup_model)
+ for index, row in df.iterrows():
+ lookup_data = LookupDataModel(lookup_file_model=lookup_model,
+ value=row[value_column],
+ label=row[label_column],
+ data=row.to_json())
+ db.session.add(lookup_data)
+ db.session.commit()
+
+ return lookup_model
+
+ @staticmethod
+ def _run_lookup_query(lookup_file_model, query, limit):
+ db_query = LookupDataModel.query.filter(LookupDataModel.lookup_file_model == lookup_file_model)
+
+ query = query.strip()
+ if len(query) > 1:
+ if ' ' in query:
+ terms = query.split(' ')
+ new_terms = []
+ for t in terms:
+ new_terms.append(t + ":*")
+ query = '|'.join(new_terms)
+ else:
+ query = "%s:*" % query
+ db_query = db_query.filter(LookupDataModel.label.match(query))
+
+ # db_query = db_query.filter(text("lookup_data.label @@ to_tsquery('simple', '%s')" % query))
+
+ return db_query.limit(limit).all()
+
+ @staticmethod
+ def _run_ldap_query(query, limit):
+ users = LdapService().search_users(query, limit)
+
+ """Converts the user models into something akin to the
+ LookupModel in models/file.py, so this can be returned in the same way
+ we return a lookup data model."""
+ user_list = []
+ for user in users:
+ user_list.append( {"value": user.uid,
+ "label": user.display_name + " (" + user.uid + ")",
+ "data": user.__dict__
+ })
+ return user_list
\ No newline at end of file
diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py
index 7f06c47b..8170fac4 100644
--- a/crc/services/workflow_processor.py
+++ b/crc/services/workflow_processor.py
@@ -212,7 +212,7 @@ class WorkflowProcessor(object):
return full_version
@staticmethod
- def __get_file_models_for_version(workflow_spec_id, version):
+ def get_file_models_for_version(workflow_spec_id, version):
file_id_strings = re.findall('\((.*)\)', version)[0].split(".")
file_ids = [int(i) for i in file_id_strings]
files = session.query(FileDataModel)\
@@ -237,12 +237,17 @@ class WorkflowProcessor(object):
.all()
@staticmethod
- def get_spec(workflow_spec_id, version):
+ def get_spec(workflow_spec_id, version=None):
"""Returns the requested version of the specification,
- or the lastest version if none is specified."""
+ or the latest version if none is specified."""
parser = WorkflowProcessor.get_parser()
process_id = None
- file_data_models = WorkflowProcessor.__get_file_models_for_version(workflow_spec_id, version)
+
+ if version is None:
+ file_data_models = WorkflowProcessor.__get_latest_file_models(workflow_spec_id)
+ else:
+ file_data_models = WorkflowProcessor.get_file_models_for_version(workflow_spec_id, version)
+
for file_data in file_data_models:
if file_data.file_model.type == FileType.bpmn:
bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
@@ -287,6 +292,10 @@ class WorkflowProcessor(object):
form_data[field.id] = random.randint(1, 1000)
elif field.type == 'boolean':
form_data[field.id] = random.choice([True, False])
+ elif field.type == 'file':
+ form_data[field.id] = random.randint(1, 100)
+ elif field.type == 'files':
+ form_data[field.id] = random.randrange(1, 100)
else:
form_data[field.id] = WorkflowProcessor._random_string()
if task.data is None:
@@ -317,7 +326,8 @@ class WorkflowProcessor(object):
Returns the new version.
"""
version = WorkflowProcessor.get_latest_version_string(self.workflow_spec_id)
- spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
+ spec = WorkflowProcessor.get_spec(self.workflow_spec_id) # Force latest version by NOT specifying version
+ # spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
bpmn_workflow.data = self.bpmn_workflow.data
for task in bpmn_workflow.get_tasks(SpiffTask.READY):
diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py
index faee089a..c5294f1f 100644
--- a/crc/services/workflow_service.py
+++ b/crc/services/workflow_service.py
@@ -1,5 +1,7 @@
from datetime import datetime
+import jinja2
+from SpiffWorkflow import Task as SpiffTask, WorkflowException
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
@@ -7,20 +9,16 @@ from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
from SpiffWorkflow.specs import CancelTask, StartTask
from flask import g
-from pandas import ExcelFile
-from sqlalchemy import func
+from jinja2 import Template
from crc import db, app
from crc.api.common import ApiError
from crc.models.api_models import Task, MultiInstanceType
-import jinja2
-from jinja2 import Template
-
-from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
+from crc.models.file import LookupDataModel
from crc.models.stats import TaskEventModel
from crc.services.file_service import FileService
+from crc.services.lookup_service import LookupService
from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine
-from SpiffWorkflow import Task as SpiffTask, WorkflowException
class WorkflowService(object):
@@ -38,7 +36,7 @@ class WorkflowService(object):
@classmethod
def test_spec(cls, spec_id):
- """Runs a spec through it's paces to see if it results in any errors. Not full proof, but a good
+ """Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good
sanity check."""
version = WorkflowProcessor.get_latest_version_string(spec_id)
spec = WorkflowProcessor.get_spec(spec_id, version)
@@ -178,10 +176,10 @@ class WorkflowService(object):
@staticmethod
def process_options(spiff_task, field):
- lookup_model = WorkflowService.get_lookup_table(spiff_task, field)
+ lookup_model = LookupService.get_lookup_table(spiff_task, field)
- # If lookup is set to true, do not populate options, a lookup will happen later.
- if field.has_property(Task.EMUM_OPTIONS_AS_LOOKUP) and field.get_property(Task.EMUM_OPTIONS_AS_LOOKUP):
+ # If this is an auto-complete field, do not populate options, a lookup will happen later.
+ if field.type == Task.FIELD_TYPE_AUTO_COMPLETE:
pass
else:
data = db.session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_model).all()
@@ -190,88 +188,6 @@ class WorkflowService(object):
for d in data:
field.options.append({"id": d.value, "name": d.label})
- @staticmethod
- def get_lookup_table(spiff_task, field):
- """ Checks to see if the options are provided in a separate lookup table associated with the
- workflow, and if so, assures that data exists in the database, and return a model than can be used
- to locate that data. """
- if field.has_property(Task.ENUM_OPTIONS_FILE_PROP):
- if not field.has_property(Task.EMUM_OPTIONS_VALUE_COL_PROP) or \
- not field.has_property(Task.EMUM_OPTIONS_LABEL_COL_PROP):
- raise ApiError.from_task("invalid_emum",
- "For enumerations based on an xls file, you must include 3 properties: %s, "
- "%s, and %s" % (Task.ENUM_OPTIONS_FILE_PROP,
- Task.EMUM_OPTIONS_VALUE_COL_PROP,
- Task.EMUM_OPTIONS_LABEL_COL_PROP),
- task=spiff_task)
-
- # Get the file data from the File Service
- file_name = field.get_property(Task.ENUM_OPTIONS_FILE_PROP)
- value_column = field.get_property(Task.EMUM_OPTIONS_VALUE_COL_PROP)
- label_column = field.get_property(Task.EMUM_OPTIONS_LABEL_COL_PROP)
- data_model = FileService.get_workflow_file_data(spiff_task.workflow, file_name)
- lookup_model = WorkflowService._get_lookup_table_from_data_model(data_model, value_column, label_column)
- return lookup_model
-
- @staticmethod
- def _get_lookup_table_from_data_model(data_model: FileDataModel, value_column, label_column):
- """ In some cases the lookup table can be very large. This method will add all values to the database
- in a way that can be searched and returned via an api call - rather than sending the full set of
- options along with the form. It will only open the file and process the options if something has
- changed. """
-
- lookup_model = db.session.query(LookupFileModel) \
- .filter(LookupFileModel.file_data_model_id == data_model.id) \
- .filter(LookupFileModel.value_column == value_column) \
- .filter(LookupFileModel.label_column == label_column).first()
-
- if not lookup_model:
- xls = ExcelFile(data_model.data)
- df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
- if value_column not in df:
- raise ApiError("invalid_emum",
- "The file %s does not contain a column named % s" % (data_model.file_model.name,
- value_column))
- if label_column not in df:
- raise ApiError("invalid_emum",
- "The file %s does not contain a column named % s" % (data_model.file_model.name,
- label_column))
-
- lookup_model = LookupFileModel(label_column=label_column, value_column=value_column,
- file_data_model_id=data_model.id)
-
- db.session.add(lookup_model)
- for index, row in df.iterrows():
- lookup_data = LookupDataModel(lookup_file_model=lookup_model,
- value=row[value_column],
- label=row[label_column],
- data=row.to_json())
- db.session.add(lookup_data)
- db.session.commit()
-
- return lookup_model
-
- @staticmethod
- def run_lookup_query(lookupFileModel, query, limit):
- db_query = LookupDataModel.query.filter(LookupDataModel.lookup_file_model == lookupFileModel)
-
- query = query.strip()
- if len(query) > 1:
- if ' ' in query:
- terms = query.split(' ')
- query = ""
- new_terms = []
- for t in terms:
- new_terms.append(t + ":*")
- query = '|'.join(new_terms)
- else:
- query = "%s:*" % query
- db_query = db_query.filter(LookupDataModel.label.match(query))
-
- # db_query = db_query.filter(text("lookup_data.label @@ to_tsquery('simple', '%s')" % query))
-
- return db_query.limit(limit).all()
-
@staticmethod
def log_task_action(processor, spiff_task, action):
task = WorkflowService.spiff_task_to_api_task(spiff_task)
diff --git a/crc/static/templates/placeholder.docx b/crc/static/templates/placeholder.docx
new file mode 100644
index 00000000..7c27415f
Binary files /dev/null and b/crc/static/templates/placeholder.docx differ
diff --git a/crc/static/templates/placeholder.png b/crc/static/templates/placeholder.png
new file mode 100644
index 00000000..86d8caf7
Binary files /dev/null and b/crc/static/templates/placeholder.png differ
diff --git a/crconnect.wsgi b/crconnect.wsgi
new file mode 100644
index 00000000..069796e4
--- /dev/null
+++ b/crconnect.wsgi
@@ -0,0 +1,15 @@
+python_home = '/usr/local/envs/crcpython3'
+
+import os
+import sys
+
+# Calculate path to site-packages directory.
+
+python_version = '.'.join(map(str, sys.version_info[:2]))
+site_packages = python_home + '/lib/python%s/site-packages' % python_version
+
+# Add the site-packages directory.
+
+site.addsitedir(site_packages)
+
+from crc import app as application
diff --git a/deploy/requirements.txt b/deploy/requirements.txt
new file mode 100644
index 00000000..420a888f
--- /dev/null
+++ b/deploy/requirements.txt
@@ -0,0 +1,90 @@
+alabaster==0.7.12
+alembic==1.4.2
+amqp==2.5.2
+aniso8601==8.0.0
+attrs==19.3.0
+babel==2.8.0
+bcrypt==3.1.7
+beautifulsoup4==4.9.1
+billiard==3.6.3.0
+blinker==1.4
+celery==4.4.2
+certifi==2020.4.5.1
+cffi==1.14.0
+chardet==3.0.4
+click==7.1.2
+clickclick==1.2.2
+commonmark==0.9.1
+configparser==5.0.0
+connexion==2.7.0
+coverage==5.1
+docutils==0.16
+docxtpl==0.9.2
+et-xmlfile==1.0.1
+flask==1.1.2
+flask-bcrypt==0.7.1
+flask-cors==3.0.8
+flask-marshmallow==0.12.0
+flask-migrate==2.5.3
+flask-restful==0.3.8
+flask-sqlalchemy==2.4.1
+flask-sso==0.4.0
+future==0.18.2
+httpretty==1.0.2
+idna==2.9
+imagesize==1.2.0
+importlib-metadata==1.6.0
+inflection==0.4.0
+itsdangerous==1.1.0
+jdcal==1.4.1
+jinja2==2.11.2
+jsonschema==3.2.0
+kombu==4.6.8
+ldap3==2.7
+lxml==4.5.1
+mako==1.1.2
+markupsafe==1.1.1
+marshmallow==3.6.0
+marshmallow-enum==1.5.1
+marshmallow-sqlalchemy==0.23.0
+numpy==1.18.4
+openapi-spec-validator==0.2.8
+openpyxl==3.0.3
+packaging==20.4
+pandas==1.0.3
+psycopg2-binary==2.8.5
+pyasn1==0.4.8
+pycparser==2.20
+pygments==2.6.1
+pyjwt==1.7.1
+pyparsing==2.4.7
+pyrsistent==0.16.0
+python-dateutil==2.8.1
+python-docx==0.8.10
+python-editor==1.0.4
+pytz==2020.1
+pyyaml==5.3.1
+recommonmark==0.6.0
+requests==2.23.0
+six==1.14.0
+snowballstemmer==2.0.0
+soupsieve==2.0.1
+sphinx==3.0.3
+sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-devhelp==1.0.2
+sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-jsmath==1.0.1
+sphinxcontrib-qthelp==1.0.3
+sphinxcontrib-serializinghtml==1.1.4
+spiffworkflow
+sqlalchemy==1.3.17
+swagger-ui-bundle==0.0.6
+urllib3==1.25.9
+vine==1.3.0
+waitress==1.4.3
+webob==1.8.6
+webtest==2.0.35
+werkzeug==1.0.1
+xlrd==1.2.0
+xlsxwriter==1.2.8
+zipp==3.1.0
diff --git a/deploy/update_requirements.sh b/deploy/update_requirements.sh
new file mode 100755
index 00000000..adbf1142
--- /dev/null
+++ b/deploy/update_requirements.sh
@@ -0,0 +1,4 @@
+jq -r '.default
+ | to_entries[]
+ | .key + .value.version' \
+ ../Pipfile.lock > requirements.txt
diff --git a/tests/base_test.py b/tests/base_test.py
index 3511b361..290b1506 100644
--- a/tests/base_test.py
+++ b/tests/base_test.py
@@ -77,20 +77,21 @@ class BaseTest(unittest.TestCase):
app.config.from_object('config.testing')
cls.ctx = app.test_request_context()
cls.app = app.test_client()
+ cls.ctx.push()
db.create_all()
@classmethod
def tearDownClass(cls):
- db.drop_all()
+ cls.ctx.pop()
session.remove()
pass
def setUp(self):
- self.ctx.push()
+ pass
def tearDown(self):
- ExampleDataLoader.clean_db() # This does not seem to work, some colision of sessions.
- self.ctx.pop()
+ ExampleDataLoader.clean_db()
+ session.flush()
self.auths = {}
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
diff --git a/tests/data/enum_options_from_file/enum_options_from_file.bpmn b/tests/data/enum_options_from_file/enum_options_from_file.bpmn
index 8080327f..6497d1f7 100644
--- a/tests/data/enum_options_from_file/enum_options_from_file.bpmn
+++ b/tests/data/enum_options_from_file/enum_options_from_file.bpmn
@@ -1,5 +1,5 @@
-
+
SequenceFlow_0lvudp8
@@ -14,9 +14,9 @@
-
-
-
+
+
+
@@ -27,20 +27,20 @@
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/enum_options_with_search/enum_options_with_search.bpmn b/tests/data/enum_options_with_search/enum_options_with_search.bpmn
index 7c682bb8..584dd261 100644
--- a/tests/data/enum_options_with_search/enum_options_with_search.bpmn
+++ b/tests/data/enum_options_with_search/enum_options_with_search.bpmn
@@ -1,5 +1,5 @@
-
+
SequenceFlow_0lvudp8
@@ -14,10 +14,9 @@
@@ -28,20 +27,20 @@
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/ldap_lookup/ldap_lookup.bpmn b/tests/data/ldap_lookup/ldap_lookup.bpmn
new file mode 100644
index 00000000..8f89c0d8
--- /dev/null
+++ b/tests/data/ldap_lookup/ldap_lookup.bpmn
@@ -0,0 +1,47 @@
+
+
+
+
+ SequenceFlow_0lvudp8
+
+
+
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0lvudp8
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/test_authentication.py b/tests/test_authentication.py
index 5a19fb32..2767797a 100644
--- a/tests/test_authentication.py
+++ b/tests/test_authentication.py
@@ -12,7 +12,7 @@ class TestAuthentication(BaseTest):
self.assertTrue(isinstance(auth_token, bytes))
self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
- def test_auth_creates_user(self):
+ def test_backdoor_auth_creates_user(self):
new_uid = 'czn1z';
self.load_example_data()
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
@@ -37,6 +37,23 @@ class TestAuthentication(BaseTest):
self.assertTrue(rv_2.status_code == 302)
self.assertTrue(str.startswith(rv_2.location, redirect_url))
+ def test_normal_auth_creates_user(self):
+ new_uid = 'lb3dp' # This user is in the test ldap system.
+ self.load_example_data()
+ user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
+ self.assertIsNone(user)
+ redirect_url = 'http://worlds.best.website/admin'
+ headers = dict(Uid=new_uid)
+ rv = self.app.get('login', follow_redirects=False, headers=headers)
+ self.assert_success(rv)
+ user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
+ self.assertIsNotNone(user)
+ self.assertEquals(new_uid, user.uid)
+ self.assertEquals("Laura Barnes", user.display_name)
+ self.assertEquals("lb3dp@virginia.edu", user.email_address)
+ self.assertEquals("E0:Associate Professor of Systems and Information Engineering", user.title)
+
+
def test_current_user_status(self):
self.load_example_data()
rv = self.app.get('/v1.0/user')
diff --git a/tests/test_ldap_service.py b/tests/test_ldap_service.py
index f4a56f47..4be65960 100644
--- a/tests/test_ldap_service.py
+++ b/tests/test_ldap_service.py
@@ -18,9 +18,10 @@ class TestLdapService(BaseTest):
def test_get_single_user(self):
user_info = self.ldap_service.user_info("lb3dp")
self.assertIsNotNone(user_info)
+ self.assertEqual("lb3dp", user_info.uid)
self.assertEqual("Laura Barnes", user_info.display_name)
self.assertEqual("Laura", user_info.given_name)
- self.assertEqual("lb3dp@virginia.edu", user_info.email)
+ self.assertEqual("lb3dp@virginia.edu", user_info.email_address)
self.assertEqual("+1 (434) 924-1723", user_info.telephone_number)
self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user_info.title)
self.assertEqual("E0:EN-Eng Sys and Environment", user_info.department)
diff --git a/tests/test_lookup_service.py b/tests/test_lookup_service.py
new file mode 100644
index 00000000..89be6168
--- /dev/null
+++ b/tests/test_lookup_service.py
@@ -0,0 +1,79 @@
+from crc import session
+from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel
+from crc.services.file_service import FileService
+from crc.services.lookup_service import LookupService
+from crc.services.workflow_processor import WorkflowProcessor
+from crc.services.workflow_service import WorkflowService
+from tests.base_test import BaseTest
+
+
+class TestLookupService(BaseTest):
+
+ def test_create_lookup_file_multiple_times_does_not_update_database(self):
+ spec = self.load_test_spec('enum_options_from_file')
+ file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
+ file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
+ LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
+ LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
+ LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
+ lookup_records = session.query(LookupFileModel).all()
+ self.assertIsNotNone(lookup_records)
+ self.assertEqual(1, len(lookup_records))
+ lookup_record = lookup_records[0]
+ lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
+ self.assertEquals(19, len(lookup_data))
+ # Using the same table with different lookup lable or value, does create additional records.
+ LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NAME", "CUSTOMER_NUMBER")
+ lookup_records = session.query(LookupFileModel).all()
+ self.assertIsNotNone(lookup_records)
+ self.assertEqual(2, len(lookup_records))
+ FileService.delete_file(file_model.id) ## Assure we can delete the file.
+
+ def test_some_full_text_queries(self):
+ self.load_test_spec('enum_options_from_file')
+ file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
+ self.assertIsNotNone(file_model)
+ file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
+ lookup_table = LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
+
+ results = LookupService._run_lookup_query(lookup_table, "medicines", limit=10)
+ self.assertEquals(1, len(results), "words in the middle of label are detected.")
+ self.assertEquals("The Medicines Company", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "", limit=10)
+ self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
+
+ results = LookupService._run_lookup_query(lookup_table, "UVA", limit=10)
+ self.assertEquals(1, len(results), "Beginning of label is found.")
+ self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "uva", limit=10)
+ self.assertEquals(1, len(results), "case does not matter.")
+ self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
+
+
+
+ results = LookupService._run_lookup_query(lookup_table, "medici", limit=10)
+ self.assertEquals(1, len(results), "partial words are picked up.")
+ self.assertEquals("The Medicines Company", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "Genetics Savings", limit=10)
+ self.assertEquals(1, len(results), "multiple terms are picked up..")
+ self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "Genetics Sav", limit=10)
+ self.assertEquals(1, len(results), "prefix queries still work with partial terms")
+ self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "Gen Sav", limit=10)
+ self.assertEquals(1, len(results), "prefix queries still work with ALL the partial terms")
+ self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+
+ results = LookupService._run_lookup_query(lookup_table, "Inc", limit=10)
+ self.assertEquals(7, len(results), "short terms get multiple correct results.")
+ self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+
+ # Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
+ #results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
+ #self.assertEquals(7, len(results), "stop words are not removed.")
+ #self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py
index edbb95d5..7cf3c8a2 100644
--- a/tests/test_tasks_api.py
+++ b/tests/test_tasks_api.py
@@ -336,6 +336,21 @@ class TestTasksApi(BaseTest):
results = json.loads(rv.get_data(as_text=True))
self.assertEqual(5, len(results))
+ def test_lookup_endpoint_for_task_ldap_field_lookup(self):
+ self.load_example_data()
+ workflow = self.create_workflow('ldap_lookup')
+ # get the first form
+ workflow = self.get_workflow_api(workflow)
+ task = workflow.next_task
+ field_id = task.form['fields'][0]['id']
+ # lb3dp is a user record in the mock ldap responses for tests.
+ rv = self.app.get('/v1.0/workflow/%i/task/%s/lookup/%s?query=%s&limit=5' %
+ (workflow.id, task.id, field_id, 'lb3dp'),
+ headers=self.logged_in_headers(),
+ content_type="application/json")
+ self.assert_success(rv)
+ results = json.loads(rv.get_data(as_text=True))
+ self.assertEqual(1, len(results))
def test_sub_process(self):
self.load_example_data()
diff --git a/tests/test_workflow_service.py b/tests/test_workflow_service.py
index d80d84e6..95385b80 100644
--- a/tests/test_workflow_service.py
+++ b/tests/test_workflow_service.py
@@ -1,6 +1,7 @@
from crc import session
from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel
from crc.services.file_service import FileService
+from crc.services.lookup_service import LookupService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.workflow_service import WorkflowService
from tests.base_test import BaseTest
@@ -76,7 +77,7 @@ class TestWorkflowService(BaseTest):
spec = self.load_test_spec('enum_options_from_file')
file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
- WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
+ LookupService.get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
lookup_records = session.query(LookupFileModel).all()
self.assertIsNotNone(lookup_records)
self.assertEqual(1, len(lookup_records))
@@ -101,71 +102,5 @@ class TestWorkflowService(BaseTest):
search_results = LookupDataModel.query.filter(LookupDataModel.label.match("bio:*")).all()
self.assertEquals(2, len(search_results))
- def test_create_lookup_file_multiple_times_does_not_update_database(self):
- spec = self.load_test_spec('enum_options_from_file')
- file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
- file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
- WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
- WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
- WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
- lookup_records = session.query(LookupFileModel).all()
- self.assertIsNotNone(lookup_records)
- self.assertEqual(1, len(lookup_records))
- lookup_record = lookup_records[0]
- lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
- self.assertEquals(19, len(lookup_data))
- # Using the same table with different lookup lable or value, does create additional records.
- WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NAME", "CUSTOMER_NUMBER")
- lookup_records = session.query(LookupFileModel).all()
- self.assertIsNotNone(lookup_records)
- self.assertEqual(2, len(lookup_records))
- FileService.delete_file(file_model.id) ## Assure we can delete the file.
-
- def test_some_full_text_queries(self):
- self.load_test_spec('enum_options_from_file')
- file_model = session.query(FileModel).filter(FileModel.name == "customer_list.xls").first()
- file_data_model = session.query(FileDataModel).filter(FileDataModel.file_model == file_model).first()
- lookup_table = WorkflowService._get_lookup_table_from_data_model(file_data_model, "CUSTOMER_NUMBER", "CUSTOMER_NAME")
- lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_table).all()
-
- results = WorkflowService.run_lookup_query(lookup_table, "medicines", limit=10)
- self.assertEquals(1, len(results), "words in the middle of label are detected.")
- self.assertEquals("The Medicines Company", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "", limit=10)
- self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
-
- results = WorkflowService.run_lookup_query(lookup_table, "UVA", limit=10)
- self.assertEquals(1, len(results), "Beginning of label is found.")
- self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "uva", limit=10)
- self.assertEquals(1, len(results), "case does not matter.")
- self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "medici", limit=10)
- self.assertEquals(1, len(results), "partial words are picked up.")
- self.assertEquals("The Medicines Company", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "Genetics Savings", limit=10)
- self.assertEquals(1, len(results), "multiple terms are picked up..")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "Genetics Sav", limit=10)
- self.assertEquals(1, len(results), "prefix queries still work with partial terms")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "Gen Sav", limit=10)
- self.assertEquals(1, len(results), "prefix queries still work with ALL the partial terms")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
-
- results = WorkflowService.run_lookup_query(lookup_table, "Inc", limit=10)
- self.assertEquals(7, len(results), "short terms get multiple correct results.")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
-
- # Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
- #results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
- #self.assertEquals(7, len(results), "stop words are not removed.")
- #self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)