From a1efa6b9e2a90bc52d7da3142d579ca19edc238e Mon Sep 17 00:00:00 2001 From: Daniil <94884910+Filienko@users.noreply.github.com> Date: Fri, 23 Aug 2024 15:43:09 -0700 Subject: [PATCH] Update dependencies (#2) * Updating python to 3.11 * Adding setup.cfg * New setup * Updated libraries, removing pinned * Updating the versions to ones generated by 3.9 pip-compile * Removing pyproject, switching to setup to support older python * Removing version from setup * Fixing formatting * Downgrading flask * Returning to previous version * WIP: Restoring the versions to ones generated by pip-compile * Updating the versions to latest * WIP: trying to free up the space * Deleting unnecessary folder * Not deleting tools directories during tests --- .github/workflows/build-deliver.yaml | 3 + .github/workflows/test.yaml | 2 +- Dockerfile | 2 +- requirements.dev.txt | 197 ++++++++++++++++++++++++--- requirements.txt | 177 +++++++++++++++++++++--- setup.cfg | 46 +++++++ setup.py | 7 + 7 files changed, 393 insertions(+), 41 deletions(-) create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/.github/workflows/build-deliver.yaml b/.github/workflows/build-deliver.yaml index 08a1888..0844fdc 100644 --- a/.github/workflows/build-deliver.yaml +++ b/.github/workflows/build-deliver.yaml @@ -9,6 +9,9 @@ jobs: # needs: [test] runs-on: ubuntu-latest steps: + - name: Delete huge unnecessary tools folder + run: rm -rf /opt/hostedtoolcache + - name: Checkout git commit uses: actions/checkout@master with: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index d4de3c8..8b98690 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -6,7 +6,7 @@ on: [push] jobs: test: runs-on: ubuntu-latest - container: python:3.9 + container: python:3.11 steps: - uses: actions/checkout@v1 diff --git a/Dockerfile b/Dockerfile index a592d53..6b3b1cd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.9 +FROM python:3.11 WORKDIR /opt/app diff --git a/requirements.dev.txt b/requirements.dev.txt index b7e7e19..7a0f774 100644 --- a/requirements.dev.txt +++ b/requirements.dev.txt @@ -1,22 +1,181 @@ # -# This file is autogenerated by pip-compile -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # -# pip-compile +# pip-compile --extra=dev --output-file=requirements.dev.txt setup.cfg # ---requirement requirements.txt -attrs==21.2.0 # via pytest -importlib-metadata==4.8.1 # via pluggy, pytest -iniconfig==1.1.1 # via pytest -packaging==21.0 # via pytest -pluggy==1.0.0 # via pytest -psutil==5.8.0 # via mirakuru -py==1.10.0 # via pytest -pyparsing==2.4.7 # via packaging -pytest-datadir==1.3.1 # via pytest-datadir -pytest-mock==3.6.1 # via ml_services (setup.py) -pytest==6.2.5 # via pytest-mock, pytest-redis, ml_services (setup.py) -requests-mock==1.9.3 # via ml_services (setup.py) -toml==0.10.2 # via pytest -typing-extensions==3.10.0.2 # via importlib-metadata -zipp==3.5.0 # via importlib-metadata +attrs==24.2.0 + # via + # cattrs + # requests-cache +blinker==1.8.2 + # via flask +cattrs==23.2.3 + # via requests-cache +certifi==2024.7.4 + # via requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via flask +exceptiongroup==1.2.2 + # via + # cattrs + # pytest +filelock==3.15.4 + # via + # huggingface-hub + # torch + # transformers + # triton +flask==3.0.3 + # via ml_services (setup.cfg) +fsspec==2024.6.1 + # via + # huggingface-hub + # torch +gunicorn==23.0.0 + # via ml_services (setup.cfg) +huggingface-hub==0.24.6 + # via + # tokenizers + # transformers +idna==3.7 + # via requests +importlib-metadata==8.4.0 + # via flask +iniconfig==2.0.0 + # via pytest +itsdangerous==2.2.0 + # via flask +jinja2==3.1.4 + # via + # flask + # torch +markupsafe==2.1.5 + # via + # jinja2 + # werkzeug +mpmath==1.3.0 + # via sympy +networkx==3.2.1 + # via torch +numpy==2.0.1 + # via + # ml_services (setup.cfg) + # pandas + # scipy + # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via torch +nvidia-cuda-runtime-cu12==12.1.105 + # via torch +nvidia-cudnn-cu12==9.1.0.70 + # via torch +nvidia-cufft-cu12==11.0.2.54 + # via torch +nvidia-curand-cu12==10.3.2.106 + # via torch +nvidia-cusolver-cu12==11.4.5.107 + # via torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via torch +nvidia-nvjitlink-cu12==12.6.20 + # via + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via torch +packaging==24.1 + # via + # gunicorn + # huggingface-hub + # pytest + # transformers +pandas==2.2.2 + # via ml_services (setup.cfg) +platformdirs==4.2.2 + # via requests-cache +pluggy==1.5.0 + # via pytest +pytest==8.3.2 + # via + # ml_services (setup.cfg) + # pytest-datadir + # pytest-mock +pytest-datadir==1.5.0 + # via ml_services (setup.cfg) +pytest-mock==3.14.0 + # via ml_services (setup.cfg) +python-dateutil==2.9.0.post0 + # via pandas +pytz==2024.1 + # via pandas +pyyaml==6.0.2 + # via + # huggingface-hub + # transformers +regex==2024.7.24 + # via transformers +requests==2.32.3 + # via + # huggingface-hub + # requests-cache + # requests-mock + # transformers +requests-cache==1.2.1 + # via ml_services (setup.cfg) +requests-mock==1.12.1 + # via ml_services (setup.cfg) +safetensors==0.4.4 + # via transformers +scipy==1.13.1 + # via ml_services (setup.cfg) +six==1.16.0 + # via + # python-dateutil + # url-normalize +sympy==1.13.2 + # via torch +tokenizers==0.19.1 + # via transformers +tomli==2.0.1 + # via pytest +torch==2.4.0 + # via ml_services (setup.cfg) +tqdm==4.66.5 + # via + # huggingface-hub + # transformers +transformers==4.44.1 + # via ml_services (setup.cfg) +triton==3.0.0 + # via torch +typing-extensions==4.12.2 + # via + # cattrs + # huggingface-hub + # torch +tzdata==2024.1 + # via pandas +url-normalize==1.4.3 + # via requests-cache +urllib3==2.2.2 + # via + # requests + # requests-cache +werkzeug==3.0.3 + # via flask +zipp==3.20.0 + # via importlib-metadata diff --git a/requirements.txt b/requirements.txt index d1c7db5..7dccccb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,23 +1,160 @@ # -# This file is autogenerated by pip-compile -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # -# pip-compile --output-file=requirements.txt +# pip-compile # -click==8.0.1 # via flask -flask==1.1.2 # via ml_services (setup.py) -gunicorn==20.1.0 # via ml_services (setup.py) -importlib-metadata==4.8.1 # via click -itsdangerous==2.0.1 # via flask -jinja2==3.0.1 # via flask -markupsafe==2.0.1 # via jinja2 -requests-cache==0.6.4 # via ml_services (setup.py) -six==1.16.0 # via ecdsa, flask-cors, url-normalize -typing-extensions==3.10.0.2 # via importlib-metadata -werkzeug==2.0.1 # via flask -zipp==3.5.0 # via importlib-metadata -torch==1.13.0 -transformers==4.24.0 -scipy==1.7.3 -numpy==1.21.5 -pandas==1.3.5 \ No newline at end of file +attrs==24.2.0 + # via + # cattrs + # requests-cache +blinker==1.8.2 + # via flask +cattrs==23.2.3 + # via requests-cache +certifi==2024.7.4 + # via requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via flask +exceptiongroup==1.2.2 + # via cattrs +filelock==3.15.4 + # via + # huggingface-hub + # torch + # transformers + # triton +flask==3.0.3 + # via ml_services (setup.cfg) +fsspec==2024.6.1 + # via + # huggingface-hub + # torch +gunicorn==23.0.0 + # via ml_services (setup.cfg) +huggingface-hub==0.24.6 + # via + # tokenizers + # transformers +idna==3.7 + # via requests +importlib-metadata==8.4.0 + # via flask +itsdangerous==2.2.0 + # via flask +jinja2==3.1.4 + # via + # flask + # torch +markupsafe==2.1.5 + # via + # jinja2 + # werkzeug +mpmath==1.3.0 + # via sympy +networkx==3.2.1 + # via torch +numpy==2.0.1 + # via + # ml_services (setup.cfg) + # pandas + # scipy + # transformers +nvidia-cublas-cu12==12.1.3.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.1.105 + # via torch +nvidia-cuda-nvrtc-cu12==12.1.105 + # via torch +nvidia-cuda-runtime-cu12==12.1.105 + # via torch +nvidia-cudnn-cu12==9.1.0.70 + # via torch +nvidia-cufft-cu12==11.0.2.54 + # via torch +nvidia-curand-cu12==10.3.2.106 + # via torch +nvidia-cusolver-cu12==11.4.5.107 + # via torch +nvidia-cusparse-cu12==12.1.0.106 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-nccl-cu12==2.20.5 + # via torch +nvidia-nvjitlink-cu12==12.6.20 + # via + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 +nvidia-nvtx-cu12==12.1.105 + # via torch +packaging==24.1 + # via + # gunicorn + # huggingface-hub + # transformers +pandas==2.2.2 + # via ml_services (setup.cfg) +platformdirs==4.2.2 + # via requests-cache +python-dateutil==2.9.0.post0 + # via pandas +pytz==2024.1 + # via pandas +pyyaml==6.0.2 + # via + # huggingface-hub + # transformers +regex==2024.7.24 + # via transformers +requests==2.32.3 + # via + # huggingface-hub + # requests-cache + # transformers +requests-cache==1.2.1 + # via ml_services (setup.cfg) +safetensors==0.4.4 + # via transformers +scipy==1.13.1 + # via ml_services (setup.cfg) +six==1.16.0 + # via + # python-dateutil + # url-normalize +sympy==1.13.2 + # via torch +tokenizers==0.19.1 + # via transformers +torch==2.4.0 + # via ml_services (setup.cfg) +tqdm==4.66.5 + # via + # huggingface-hub + # transformers +transformers==4.44.1 + # via ml_services (setup.cfg) +triton==3.0.0 + # via torch +typing-extensions==4.12.2 + # via + # cattrs + # huggingface-hub + # torch +tzdata==2024.1 + # via pandas +url-normalize==1.4.3 + # via requests-cache +urllib3==2.2.2 + # via + # requests + # requests-cache +werkzeug==3.0.3 + # via flask +zipp==3.20.0 + # via importlib-metadata \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..86544bf --- /dev/null +++ b/setup.cfg @@ -0,0 +1,46 @@ +[metadata] +name = isacc-ml +description = Flask-based microservice for NLP machine learning utilities in the ISACC service +author = CIRG +author_email = cirg-adm@uw.edu +url = https://github.com/uwcirg/isacc-ml + +[options] +packages = ml_services +python_requires = >=3.9 +install_requires = + flask + gunicorn + requests-cache + numpy + pandas + scipy + torch + transformers + +[options.extras_require] +dev = + pytest + pytest-mock + pytest-datadir + requests-mock + +[flake8] +max-line-length=100 + +[tool:pytest] +addopts = --color yes --verbose +console_output_style = classic +filterwarnings = + # only print each warning once per module + module + + # Ignore testing-specific warning + ignore:^"localhost" is not a valid cookie domain, it must contain a.*:Warning + + # Ignore warnings on specific dependencies (already reported upstream) + ignore::DeprecationWarning:dogpile + ignore::DeprecationWarning:flask_user + + # error on bad assertions + error:.*assertion is always true.* diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..5da30f3 --- /dev/null +++ b/setup.py @@ -0,0 +1,7 @@ +from setuptools import setup + +setup( + name="ml_services", + package=["ml_services"], + include_package_data=True +) \ No newline at end of file