From 0f312605702d8f9b46c07319436d1f46312fff99 Mon Sep 17 00:00:00 2001 From: Kevin Burnett <18027+burnettk@users.noreply.github.com> Date: Thu, 20 Jun 2024 18:58:00 +0000 Subject: [PATCH] Scrap openai lib (#1782) * scrap openai lib * iterate * bring down temperature and top_p * remove lib --------- Co-authored-by: burnettk --- spiffworkflow-backend/poetry.lock | 145 +----------------- spiffworkflow-backend/pyproject.toml | 1 - .../routes/script_assist_controller.py | 43 +++--- 3 files changed, 24 insertions(+), 165 deletions(-) diff --git a/spiffworkflow-backend/poetry.lock b/spiffworkflow-backend/poetry.lock index 43b3c34df..b7fb245f2 100644 --- a/spiffworkflow-backend/poetry.lock +++ b/spiffworkflow-backend/poetry.lock @@ -58,28 +58,6 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] -[[package]] -name = "anyio" -version = "4.3.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] - [[package]] name = "apscheduler" version = "3.10.4" @@ -789,17 +767,6 @@ files = [ {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] -[[package]] -name = "distro" -version = "1.9.0" -description = "Distro - an OS platform information API" -optional = false -python-versions = ">=3.6" -files = [ - {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, - {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, -] - [[package]] name = "dparse" version = "0.6.4b0" @@ -1148,62 +1115,6 @@ gevent = ["gevent (>=1.4.0)"] setproctitle = ["setproctitle"] tornado = ["tornado (>=0.2)"] -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.3" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpcore-1.0.3-py3-none-any.whl", hash = "sha256:9a6a501c3099307d9fd76ac244e08503427679b1e81ceb1d922485e2f2462ad2"}, - {file = "httpcore-1.0.3.tar.gz", hash = "sha256:5c0f9546ad17dac4d0772b0808856eb616eb8b48ce94f49ed819fd6982a8a544"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.24.0)"] - -[[package]] -name = "httpx" -version = "0.26.0" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpx-0.26.0-py3-none-any.whl", hash = "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd"}, - {file = "httpx-0.26.0.tar.gz", hash = "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" -sniffio = "*" - -[package.extras] -brotli = ["brotli", "brotlicffi"] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] - [[package]] name = "identify" version = "2.5.35" @@ -1738,29 +1649,6 @@ signals = ["blinker"] signedtoken = ["cryptography", "pyjwt (>=1.0.0)"] test = ["blinker", "cryptography", "mock", "nose", "pyjwt (>=1.0.0)", "unittest2"] -[[package]] -name = "openai" -version = "1.34.0" -description = "The official Python library for the openai API" -optional = false -python-versions = ">=3.7.1" -files = [ - {file = "openai-1.34.0-py3-none-any.whl", hash = "sha256:018623c2f795424044675c6230fa3bfbf98d9e0aab45d8fd116f2efb2cfb6b7e"}, - {file = "openai-1.34.0.tar.gz", hash = "sha256:95c8e2da4acd6958e626186957d656597613587195abd0fb2527566a93e76770"}, -] - -[package.dependencies] -anyio = ">=3.5.0,<5" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -pydantic = ">=1.9.0,<3" -sniffio = "*" -tqdm = ">4" -typing-extensions = ">=4.7,<5" - -[package.extras] -datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] - [[package]] name = "packaging" version = "24.0" @@ -2942,17 +2830,6 @@ files = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] -[[package]] -name = "sniffio" -version = "1.3.0" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, -] - [[package]] name = "SpiffWorkflow" version = "3.0.0" @@ -3139,26 +3016,6 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] -[[package]] -name = "tqdm" -version = "4.66.4" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, - {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - [[package]] name = "typeguard" version = "4.3.0" @@ -3499,4 +3356,4 @@ tests-strict = ["pytest (==4.6.0)", "pytest (==4.6.0)", "pytest (==6.2.5)", "pyt [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "ac1db35c6855a98c67f2fcf1d692d2b9284ef41b33eabc426ca1969417c20e16" +content-hash = "c16eafa40b7b79e06028f17a501d89def17fb32f7f1e26fdc084b899afc9c66b" diff --git a/spiffworkflow-backend/pyproject.toml b/spiffworkflow-backend/pyproject.toml index abaf5eea7..b3eb651f0 100644 --- a/spiffworkflow-backend/pyproject.toml +++ b/spiffworkflow-backend/pyproject.toml @@ -32,7 +32,6 @@ sentry-sdk = {extras = ['flask'], version = "^2.5"} # sphinx-autoapi = "^2.0" psycopg2 = "^2.9.3" typing-extensions = "^4.10.0" -openai = "^1.34.0" spiffworkflow-connector-command = {git = "https://github.com/sartography/spiffworkflow-connector-command.git", rev = "main"} diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/routes/script_assist_controller.py b/spiffworkflow-backend/src/spiffworkflow_backend/routes/script_assist_controller.py index d30cce854..d31ff7843 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/routes/script_assist_controller.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/routes/script_assist_controller.py @@ -1,8 +1,8 @@ +import requests from flask import current_app from flask import jsonify from flask import make_response from flask.wrappers import Response -from openai import OpenAI from spiffworkflow_backend.exceptions.api_error import ApiError @@ -34,28 +34,31 @@ def process_message(body: dict) -> Response: no_nonsense_append = ( "Do not include any text other than the complete python script. " "Do not include any lines with comments. " - "Reject any request that does not appear to be for a python script." - "Do not include the word 'OpenAI' in any responses." + "Reject any request that does not appear to be for a python script. " + "Do not include the word 'OpenAI' in any responses. " + "Do not use print statements, but instead assign results to new variables. " ) # Build query, set up OpenAI client, and get response query = no_nonsense_prepend + str(body["query"]) + no_nonsense_append - client = OpenAI(api_key=openai_api_key) + headers = {"Authorization": f"Bearer {openai_api_key}"} - # TODO: Might be good to move Model and maybe other parameters to config - completion = client.chat.completions.create( - messages=[ - { - "role": "user", - "content": query, - } - ], - model="gpt-3.5-turbo", - temperature=1, - max_tokens=256, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - ) + payload = { + # other reasonable options include gpt-4o (more expensive, better) + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": query}], + # temerature controls the randomness of predictions. A lower temperature (e.g., 0.5) can help produce more deterministic + # outputs, which is useful for generating precise code. + "temperature": 0.5, + "max_tokens": 256, + # top_p determines the diversity of the model's outputs. A lower value (e.g., 0.5) focuses the model on producing more + # likely tokens, which can be beneficial for generating coherent code snippets. + "top_p": 0.5, + "frequency_penalty": 0, + "presence_penalty": 0, + } + response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, timeout=30) + response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code + completion = response.json()["choices"][0]["message"]["content"] - return make_response(jsonify({"result": completion.choices[0].message.content}), 200) + return make_response(jsonify({"result": completion}), 200)