diff --git a/.github/actions/deepspeech-v2/Dockerfile b/.github/actions/deepspeech-v2/Dockerfile deleted file mode 100644 index a9b2c13810..0000000000 --- a/.github/actions/deepspeech-v2/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -# Get base from a pytorch image -FROM pytorch/pytorch:1.5.1-cuda10.1-cudnn7-runtime - -# Set to install things in non-interactive mode -ENV DEBIAN_FRONTEND noninteractive - -# Install system wide softwares -RUN apt-get update \ - && apt-get install -y \ - libgl1-mesa-glx \ - libx11-xcb1 \ - git \ - gcc \ - mono-mcs \ - cmake \ - libavcodec-extra \ - ffmpeg \ - curl \ - && apt-get clean all \ - && rm -r /var/lib/apt/lists/* - -RUN /opt/conda/bin/conda install --yes \ - astropy \ - matplotlib \ - pandas \ - scikit-learn \ - scikit-image - -# Install necessary libraries for deepspeech v2 -RUN pip install torch -RUN pip install tensorflow -RUN pip install torchaudio==0.5.1 - -RUN git clone https://github.com/SeanNaren/warp-ctc.git -RUN cd warp-ctc && mkdir build && cd build && cmake .. && make -RUN cd warp-ctc/pytorch_binding && python setup.py install - -RUN git clone https://github.com/SeanNaren/deepspeech.pytorch.git -RUN cd deepspeech.pytorch && git checkout V2.1 -RUN cd deepspeech.pytorch && pip install -r requirements.txt -RUN cd deepspeech.pytorch && pip install -e . - -RUN pip install numba==0.50.0 -RUN pip install pytest-cov -RUN pip install pydub==0.25.1 diff --git a/.github/actions/deepspeech-v2/action.yml b/.github/actions/deepspeech-v2/action.yml deleted file mode 100644 index fbed446b8b..0000000000 --- a/.github/actions/deepspeech-v2/action.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: 'Test DeepSpeech v2' -description: 'Run tests for DeepSpeech v2' -runs: - using: 'composite' - steps: - - run: $GITHUB_ACTION_PATH/run.sh - shell: bash diff --git a/.github/actions/deepspeech-v2/run.sh b/.github/actions/deepspeech-v2/run.sh deleted file mode 100755 index e8bf57f2e9..0000000000 --- a/.github/actions/deepspeech-v2/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -exit_code=0 - -pytest --cov-report=xml --cov=art --cov-append -q -vv tests/estimators/speech_recognition/test_pytorch_deep_speech.py --framework=pytorch --durations=0 -if [[ $? -ne 0 ]]; then exit_code=1; echo "Failed estimators/speech_recognition/test_pytorch_deep_speech tests"; fi -pytest --cov-report=xml --cov=art --cov-append -q -vv tests/attacks/evasion/test_imperceptible_asr_pytorch.py --framework=pytorch --durations=0 -if [[ $? -ne 0 ]]; then exit_code=1; echo "Failed attacks/evasion/test_imperceptible_asr_pytorch tests"; fi - -exit ${exit_code} diff --git a/.github/actions/deepspeech-v3/Dockerfile b/.github/actions/deepspeech-v3/Dockerfile index 2b83524703..89ecadb38e 100644 --- a/.github/actions/deepspeech-v3/Dockerfile +++ b/.github/actions/deepspeech-v3/Dockerfile @@ -1,5 +1,5 @@ -# Get base from a pytorch image -FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime +pod# Get base from a pytorch image +FROM pytorch/pytorch:2.1.1-cuda12.1-cudnn8-runtime # Set to install things in non-interactive mode ENV DEBIAN_FRONTEND noninteractive @@ -17,26 +17,19 @@ RUN apt-get update \ curl \ libsndfile-dev \ libsndfile1 \ + vim \ + curl \ && apt-get clean all \ && rm -r /var/lib/apt/lists/* -RUN /opt/conda/bin/conda install --yes \ - astropy \ - matplotlib \ - pandas \ - scikit-learn \ - scikit-image - # Install necessary libraries for deepspeech v3 -RUN pip install torch -RUN pip install tensorflow -RUN pip install torchaudio==0.6.0 -RUN pip install --no-build-isolation fairscale +RUN pip install --ignore-installed PyYAML torch==2.1.1 tensorflow==2.14.1 torchaudio==2.1.1 pytorch-lightning==2.1.2 scikit-learn==1.3.2 +RUN pip install --no-build-isolation fairscale==0.4.13 RUN git clone https://github.com/SeanNaren/deepspeech.pytorch.git -RUN cd deepspeech.pytorch && pip install -r requirements.txt -RUN cd deepspeech.pytorch && pip install -e . +RUN cd deepspeech.pytorch && sed -i '/^sklearn/d' requirements.txt && pip install -r requirements.txt && pip install -e . + +RUN pip install numba==0.56.4 pytest-cov==4.1.0 pydub==0.25.1 +RUN pip list -RUN pip install numba==0.50.0 -RUN pip install pytest-cov -RUN pip install pydub==0.25.1 +RUN mkdir -p /root/.art/data && cd /root/.art/data && curl -LJO "https://github.com/SeanNaren/deepspeech.pytorch/releases/download/V3.0/librispeech_pretrained_v3.ckpt" diff --git a/.github/workflows/ci-deepspeech-v2.yml b/.github/workflows/ci-deepspeech-v2.yml deleted file mode 100644 index ec8a5c78e0..0000000000 --- a/.github/workflows/ci-deepspeech-v2.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: CI PyTorchDeepSpeech v2 -on: - # Run on manual trigger - workflow_dispatch: - - # Run on pull requests - pull_request: - paths-ignore: - - '*.md' - - # Run on merge queue - merge_group: - - # Run when pushing to main or dev branches - push: - branches: - - main - - dev* - - # Run scheduled CI flow daily - schedule: - - cron: '0 8 * * 0' - -jobs: - test_deepspeech_v2: - name: PyTorchDeepSpeech v2 - runs-on: ubuntu-latest - container: adversarialrobustnesstoolbox/art_testing_envs:deepspeech_v2 - steps: - - name: Checkout Repo - uses: actions/checkout@v3 - - name: Run Test Action - uses: ./.github/actions/deepspeech-v2 - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - fail_ci_if_error: true diff --git a/.github/workflows/ci-deepspeech-v3.yml b/.github/workflows/ci-deepspeech-v3.yml index ff2bd88666..2ea4ecefd1 100644 --- a/.github/workflows/ci-deepspeech-v3.yml +++ b/.github/workflows/ci-deepspeech-v3.yml @@ -23,9 +23,9 @@ on: jobs: test_deepspeech_v3_torch_1_10: - name: PyTorchDeepSpeech v3 / PyTorch 1.10 + name: PyTorchDeepSpeech v3 / PyTorch 2.1.1 runs-on: ubuntu-latest - container: adversarialrobustnesstoolbox/art_testing_envs:deepspeech_v3_torch_1_10 + container: adversarialrobustnesstoolbox/art_testing_envs:deepspeech_v3_torch_2_1_1 steps: - name: Checkout Repo uses: actions/checkout@v3 diff --git a/.github/workflows/ci-huggingface.yml b/.github/workflows/ci-huggingface.yml index ed3056ad06..bb1f9b6dd0 100644 --- a/.github/workflows/ci-huggingface.yml +++ b/.github/workflows/ci-huggingface.yml @@ -41,7 +41,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -50,8 +50,8 @@ jobs: sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel pip3 install -r requirements_test.txt - pip install tensorflow==2.10.1 - pip install keras==2.10.0 + pip install tensorflow==2.14.0 + pip install keras==2.14.0 pip install torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchvision==${{ matrix.torchvision }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }} -f https://download.pytorch.org/whl/cpu/torch_stable.html diff --git a/.github/workflows/ci-keras.yml b/.github/workflows/ci-keras.yml index 2ee6c1028e..2a05278c99 100644 --- a/.github/workflows/ci-keras.yml +++ b/.github/workflows/ci-keras.yml @@ -28,37 +28,31 @@ jobs: fail-fast: false matrix: include: - - name: Keras 2.9.0 (TensorFlow 2.9.2 Python 3.9) + - name: Keras 2.13.1 (TensorFlow 2.13.1 Python 3.10) framework: keras - python: 3.9 - tensorflow: 2.9.2 - keras: 2.9.0 - tf_addons: 0.17.0 - - name: TensorFlow-Keras 2.9.2 (Keras 2.9.0 Python 3.9) - framework: kerastf - python: 3.9 - tensorflow: 2.9.2 - keras: 2.9.0 - tf_addons: 0.17.0 - - name: Keras 2.10.0 (TensorFlow 2.10.1 Python 3.9) + python: '3.10' + tensorflow: 2.13.1 + keras: 2.13.1 + tf_addons: 0.19.0 + - name: Keras 2.14.0 (TensorFlow 2.14.0 Python 3.10) framework: keras - python: 3.9 - tensorflow: 2.10.1 - keras: 2.10.0 - tf_addons: 0.18.0 - - name: TensorFlow-Keras 2.10.1 (Keras 2.10.0 Python 3.9) + python: '3.10' + tensorflow: 2.14.0 + keras: 2.14.0 + tf_addons: 0.20.0 + - name: TensorFlow-Keras 2.14.0 (Keras 2.14.0 Python 3.10) framework: kerastf - python: 3.9 - tensorflow: 2.10.1 - keras: 2.10.0 - tf_addons: 0.18.0 + python: '3.10' + tensorflow: 2.14.0 + keras: 2.14.0 + tf_addons: 0.20.0 name: ${{ matrix.name }} steps: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -66,7 +60,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -r requirements_test.txt + pip install -q -r <(sed '/^tensorflow/d;/^keras/d;/^tensorflow-addons/d' requirements_test.txt) pip install tensorflow==${{ matrix.tensorflow }} pip install keras==${{ matrix.keras }} pip install tensorflow-addons==${{ matrix.tf_addons }} diff --git a/.github/workflows/ci-legacy.yml b/.github/workflows/ci-legacy.yml index 775d9b9d54..191178750d 100644 --- a/.github/workflows/ci-legacy.yml +++ b/.github/workflows/ci-legacy.yml @@ -29,11 +29,11 @@ jobs: matrix: module: [attacks_1, attacks_2, estimators, defences, metrics, art] include: - - name: legacy (TensorFlow 2.10.1 Keras 2.10.0 PyTorch 1.13.1 scikit-learn 1.1.3 Python 3.9) + - name: legacy (TensorFlow 2.14.0 Keras 2.14.0 PyTorch 1.13.1 scikit-learn 1.1.3 Python 3.9) framework: legacy - python: 3.9 - tensorflow: 2.10.1 - keras: 2.10.0 + python: '3.10' + tensorflow: 2.14.0 + keras: 2.14.0 torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1+cpu @@ -44,7 +44,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 631f3f539f..69304e6bed 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -41,7 +41,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Pre-install Lingvo ASR @@ -76,7 +76,7 @@ jobs: pip install tqdm==4.64.1 pip list - name: Run ${{ matrix.name }} Tests - run: ./run_tests.sh ${{ matrix.framework }} + run: pytest --cov-report=xml --cov=art --cov-append -q -vv tests/estimators/speech_recognition/test_tensorflow_lingvo.py --framework=${{ matrix.framework }} --durations=0 - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: diff --git a/.github/workflows/ci-mxnet.yml b/.github/workflows/ci-mxnet.yml index 96a6fe0784..0567fc8c45 100644 --- a/.github/workflows/ci-mxnet.yml +++ b/.github/workflows/ci-mxnet.yml @@ -28,16 +28,16 @@ jobs: fail-fast: false matrix: include: - - name: mxnet (Python 3.8) + - name: mxnet (Python 3.9) framework: mxnet - python: 3.8 + python: 3.9 name: Run ${{ matrix.name }} Tests steps: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -45,8 +45,6 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install tensorflow==2.4.1 - pip install keras==2.4.3 pip3 install -q -r requirements_test.txt pip list - name: Run ${{ matrix.name }} ${{ matrix.module }} Tests diff --git a/.github/workflows/ci-pytorch-object-detectors.yml b/.github/workflows/ci-pytorch-object-detectors.yml index 049efc7cb7..bb9037b45f 100644 --- a/.github/workflows/ci-pytorch-object-detectors.yml +++ b/.github/workflows/ci-pytorch-object-detectors.yml @@ -31,7 +31,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.10' - name: Install Dependencies diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index d162dfdcbd..2f8d9a4810 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -28,37 +28,25 @@ jobs: fail-fast: false matrix: include: - - name: PyTorch 1.11.0 (Python 3.9) - framework: pytorch - python: 3.8 - torch: 1.11.0+cpu - torchvision: 0.12.0+cpu - torchaudio: 0.11.0 - - name: PyTorch 1.12.1 (Python 3.9) - framework: pytorch - python: 3.8 - torch: 1.12.1+cpu - torchvision: 0.13.1+cpu - torchaudio: 0.12.1 - - name: PyTorch 1.13.1 (Python 3.9) - framework: pytorch - python: 3.9 - torch: 1.13.1+cpu - torchvision: 0.14.1+cpu - torchaudio: 0.13.1 - name: PyTorch 1.13.1 (Python 3.10) framework: pytorch python: '3.10' torch: 1.13.1+cpu torchvision: 0.14.1+cpu torchaudio: 0.13.1 + - name: PyTorch 2.1.2 (Python 3.10) + framework: pytorch + python: '3.10' + torch: 2.1.2 + torchvision: 0.16.2+cpu + torchaudio: 2.1.2 name: ${{ matrix.name }} steps: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -67,8 +55,6 @@ jobs: sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel pip3 install -r requirements_test.txt - pip install tensorflow==2.10.1 - pip install keras==2.10.0 pip install torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchvision==${{ matrix.torchvision }} -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }} -f https://download.pytorch.org/whl/cpu/torch_stable.html diff --git a/.github/workflows/ci-scikit-learn.yml b/.github/workflows/ci-scikit-learn.yml index 722b8b4b88..562749106b 100644 --- a/.github/workflows/ci-scikit-learn.yml +++ b/.github/workflows/ci-scikit-learn.yml @@ -50,7 +50,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -59,8 +59,6 @@ jobs: sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel pip install -r requirements_test.txt - pip install tensorflow==2.10.1 - pip install keras==2.10.0 pip install scikit-learn==${{ matrix.scikit-learn }} pip list - name: Run Tests diff --git a/.github/workflows/ci-style-checks.yml b/.github/workflows/ci-style-checks.yml index c8283c8b9d..3e2a9e129c 100644 --- a/.github/workflows/ci-style-checks.yml +++ b/.github/workflows/ci-style-checks.yml @@ -29,9 +29,9 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: '3.10' - name: Pre-install run: | sudo apt-get update @@ -39,16 +39,16 @@ jobs: - name: Install Dependencies run: | python -m pip install --upgrade pip setuptools wheel - pip install -q pylint==2.12.2 mypy==0.931 pycodestyle==2.8.0 black==21.12b0 - pip install -q -r requirements_test.txt + pip install -q pylint==2.12.2 mypy==1.7.1 pycodestyle==2.8.0 black==21.12b0 + pip install -q -r <(sed '/^numpy/d;/^pluggy/d;/^tensorflow/d;/^keras/d' requirements_test.txt) + pip install numpy==1.22.4 pip install pluggy==0.13.1 - pip install tensorflow==2.7.0 - pip install keras==2.7.0 + pip install tensorflow==2.13.1 + pip install keras==2.13.1 pip install types-six pip install types-PyYAML pip install types-setuptools pip install click==8.0.2 - pip install numpy==1.21.6 pip list - name: pycodestyle run: pycodestyle --ignore=C0330,C0415,E203,E231,W503 --max-line-length=120 art diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index e573824d7f..aa2d1390b7 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -40,7 +40,7 @@ jobs: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -48,14 +48,20 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^jax/d' requirements_test.txt) + pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d' requirements_test.txt) pip install pandas==1.3.5 pip install scipy==1.7.2 pip install matplotlib==3.5.3 pip install xgboost==1.6.2 + pip install protobuf==3.20.1 pip install tensorflow==${{ matrix.tensorflow }} pip install keras==${{ matrix.keras }} - pip install jax[cpu]==0.3.25 + pip install numpy==1.20 + pip install torch==1.13.1 + pip install torchaudio==0.13.1 + pip install torchvision==0.14.1+cpu + pip install Pillow==9.5.0 + pip install h5py==3.8.0 pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/.github/workflows/ci-tensorflow-v2.yml b/.github/workflows/ci-tensorflow-v2.yml index 1634d8ebe4..a0d812b24b 100644 --- a/.github/workflows/ci-tensorflow-v2.yml +++ b/.github/workflows/ci-tensorflow-v2.yml @@ -28,41 +28,34 @@ jobs: fail-fast: false matrix: include: - - name: TensorFlow 2.9.2 (Keras 2.9.0 Python 3.9) + - name: TensorFlow 2.13.1 (Keras 2.13.1 Python 3.10) framework: tensorflow - python: 3.9 - tensorflow: 2.9.2 + python: '3.10' + tensorflow: 2.13.1 tf_version: v2 - keras: 2.9.0 - tf_addons: 0.17.1 - - name: TensorFlow 2.10.1v1 (Keras 2.10.0 Python 3.9) + keras: 2.13.1 + tf_addons: 0.21.0 + - name: TensorFlow 2.14.0v1 (Keras 2.14.0 Python 3.10) framework: tensorflow2v1 - python: 3.9 - tensorflow: 2.10.1 - tf_version: v2 - keras: 2.10.0 - tf_addons: 0.18.0 - - name: TensorFlow 2.10.1 (Keras 2.10.0 Python 3.9) - framework: tensorflow - python: 3.9 - tensorflow: 2.10.1 + python: '3.10' + tensorflow: 2.14.0 tf_version: v2 - keras: 2.10.0 - tf_addons: 0.18.0 - - name: TensorFlow 2.10.1 (Keras 2.10.0 Python 3.10) + keras: 2.14.0 + tf_addons: 0.21.0 + - name: TensorFlow 2.14.0 (Keras 2.14.0 Python 3.10) framework: tensorflow python: '3.10' - tensorflow: 2.10.1 + tensorflow: 2.14.0 tf_version: v2 - keras: 2.10.0 - tf_addons: 0.18.0 + keras: 2.14.0 + tf_addons: 0.21.0 name: ${{ matrix.name }} steps: - name: Checkout Repo uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install Dependencies @@ -70,7 +63,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -r requirements_test.txt + pip install -q -r <(sed '/^tensorflow/d;/^keras/d;/^tensorflow-addons/d' requirements_test.txt) pip install tensorflow==${{ matrix.tensorflow }} pip install keras==${{ matrix.keras }} pip install tensorflow-addons==${{ matrix.tf_addons }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 91066aeb9c..09fd699ad4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -36,7 +36,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -48,7 +48,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -62,4 +62,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/dockerhub.yml b/.github/workflows/dockerhub.yml index 6bd587df59..122f5a366f 100644 --- a/.github/workflows/dockerhub.yml +++ b/.github/workflows/dockerhub.yml @@ -31,7 +31,7 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 + uses: docker/metadata-action@9dc751fe249ad99385a2583ee0d084c400eee04e with: images: adversarialrobustnesstoolbox/releases tags: | @@ -39,7 +39,7 @@ jobs: type=semver,pattern={{version}} - name: Build and push Docker image - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 with: context: . push: true diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py index 639c64bb94..f6af29d8ce 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py @@ -251,7 +251,11 @@ def generate( # type: ignore return self.patch, self._get_circular_patch_mask() def apply_patch( - self, x: np.ndarray, scale: float, patch_external: np.ndarray = None, mask: Optional[np.ndarray] = None + self, + x: np.ndarray, + scale: float, + patch_external: Optional[np.ndarray] = None, + mask: Optional[np.ndarray] = None, ) -> np.ndarray: """ A function to apply the learned adversarial patch to images or videos. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py index 796a51dfad..a9a3f0bf6c 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py @@ -25,7 +25,7 @@ import logging import math -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, Optional, Tuple, Union, TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -270,15 +270,15 @@ def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "tor y = np.linspace(-1, 1, diameter) x_grid, y_grid = np.meshgrid(x, y, sparse=True) z_grid = (x_grid ** 2 + y_grid ** 2) ** sharpness - image_mask = 1 - np.clip(z_grid, -1, 1) + image_mask: Union[int, np.ndarray[Any, np.dtype[Any]]] = 1 - np.clip(z_grid, -1, 1) elif self.patch_type == "square": image_mask = np.ones((diameter, diameter)) image_mask = np.expand_dims(image_mask, axis=0) image_mask = np.broadcast_to(image_mask, self.patch_shape) - image_mask = torch.Tensor(np.array(image_mask)).to(self.estimator.device) - image_mask = torch.stack([image_mask] * nb_samples, dim=0) - return image_mask + image_mask_tensor = torch.Tensor(np.array(image_mask)).to(self.estimator.device) + image_mask_tensor = torch.stack([image_mask_tensor] * nb_samples, dim=0) + return image_mask_tensor def _random_overlay( self, diff --git a/art/attacks/evasion/boundary.py b/art/attacks/evasion/boundary.py index 401bf41761..2738747ba4 100644 --- a/art/attacks/evasion/boundary.py +++ b/art/attacks/evasion/boundary.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import List, Optional, Tuple, TYPE_CHECKING import numpy as np from tqdm.auto import tqdm, trange @@ -268,14 +268,14 @@ def _attack( for _ in trange(self.max_iter, desc="Boundary attack - iterations", disable=not self.verbose): # Trust region method to adjust delta for _ in range(self.num_trial): - potential_advs = [] + potential_advs_list: List[np.ndarray] = [] for _ in range(self.sample_size): potential_adv = x_adv + self._orthogonal_perturb(self.curr_delta, x_adv, original_sample) potential_adv = np.clip(potential_adv, clip_min, clip_max) - potential_advs.append(potential_adv) + potential_advs_list.append(potential_adv) preds = np.argmax( - self.estimator.predict(np.array(potential_advs), batch_size=self.batch_size), + self.estimator.predict(np.array(potential_advs_list), batch_size=self.batch_size), axis=1, ) @@ -292,7 +292,7 @@ def _attack( self.curr_delta /= self.step_adapt if delta_ratio > 0: - x_advs = np.array(potential_advs)[np.where(satisfied)[0]] + x_advs = np.array(potential_advs_list)[np.where(satisfied)[0]] break else: # pragma: no cover logger.warning("Adversarial example found but not optimal.") diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index 992f596956..16c795cac1 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -2378,7 +2378,7 @@ def generate( return best_advs.astype(config.ART_NUMPY_DTYPE) def norms(self, x: np.ndarray) -> np.ndarray: - order = self.norm if self.norm != "inf" else np.inf + order = float(self.norm) if self.norm != "inf" else np.inf norm = np.linalg.norm(x=x.reshape(x.shape[0], -1), ord=order, axis=1) return norm @@ -2542,7 +2542,7 @@ def _binary_search( """ # First set upper and lower bounds as well as the threshold for the binary search if norm == 2: - (upper_bound, lower_bound) = (1, 0) + (upper_bound, lower_bound) = (np.array(1.0), np.array(0.0)) if threshold is None: threshold = self.theta @@ -2550,7 +2550,7 @@ def _binary_search( else: (upper_bound, lower_bound) = ( np.max(abs(original_sample - current_sample)), - 0, + np.array(0.0), ) if threshold is None: @@ -2580,7 +2580,7 @@ def _binary_search( result = self._interpolate( current_sample=current_sample, original_sample=original_sample, - alpha=upper_bound, + alpha=float(upper_bound), norm=norm, ) diff --git a/art/attacks/evasion/carlini.py b/art/attacks/evasion/carlini.py index c9bfca01f4..383c8a4aeb 100644 --- a/art/attacks/evasion/carlini.py +++ b/art/attacks/evasion/carlini.py @@ -136,7 +136,7 @@ def __init__( self._tanh_smoother = 0.999999 def _loss( - self, x: np.ndarray, x_adv: np.ndarray, target: np.ndarray, c_weight: float + self, x: np.ndarray, x_adv: np.ndarray, target: np.ndarray, c_weight: np.ndarray ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Compute the objective function value. diff --git a/art/attacks/evasion/dpatch.py b/art/attacks/evasion/dpatch.py index 20923f58de..52a3b9979c 100644 --- a/art/attacks/evasion/dpatch.py +++ b/art/attacks/evasion/dpatch.py @@ -264,7 +264,7 @@ def _augment_images_with_patch( random_location: bool, channels_first: bool, mask: Optional[np.ndarray] = None, - transforms: List[Dict[str, int]] = None, + transforms: Optional[List[Dict[str, int]]] = None, ) -> Tuple[np.ndarray, List[Dict[str, int]]]: """ Augment images with patch. diff --git a/art/attacks/evasion/dpatch_robust.py b/art/attacks/evasion/dpatch_robust.py index 9ad4bf611e..b7c42c5a78 100644 --- a/art/attacks/evasion/dpatch_robust.py +++ b/art/attacks/evasion/dpatch_robust.py @@ -409,8 +409,8 @@ def _untransform_gradients( gradients = transforms["brightness"] * gradients # Undo rotations: - rot90 = (4 - transforms["rot90"]) % 4 - gradients = np.rot90(gradients, rot90, (1, 2)) + rot90 = int((4 - transforms["rot90"]) % 4) + gradients = np.rot90(gradients, k=rot90, axes=(1, 2)) # Account for cropping when considering the upper left point of the patch: x_1 = self.patch_location[0] - int(transforms["crop_x"]) diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 85491c2630..637d19f010 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -159,14 +159,17 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) # Get current predictions active_indices = np.arange(len(batch)) + current_eps: Union[int, float, np.ndarray] + partial_stop_condition: Union[bool, np.ndarray] + if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: current_eps = self.eps_step[batch_index_1:batch_index_2] - partial_stop_condition = (current_eps <= self.eps[batch_index_1:batch_index_2]).all() + partial_stop_condition = bool((current_eps <= self.eps[batch_index_1:batch_index_2]).all()) else: current_eps = self.eps_step - partial_stop_condition = (current_eps <= self.eps).all() + partial_stop_condition = bool((current_eps <= self.eps).all()) else: current_eps = self.eps_step @@ -190,11 +193,11 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: current_eps = current_eps + self.eps_step[batch_index_1:batch_index_2] - partial_stop_condition = (current_eps <= self.eps[batch_index_1:batch_index_2]).all() + partial_stop_condition = bool((current_eps <= self.eps[batch_index_1:batch_index_2]).all()) else: current_eps = current_eps + self.eps_step - partial_stop_condition = (current_eps <= self.eps).all() + partial_stop_condition = bool((current_eps <= self.eps).all()) else: current_eps = current_eps + self.eps_step @@ -539,6 +542,9 @@ def _compute( # Get perturbation perturbation = self._compute_perturbation(batch, batch_labels, mask_batch, decay, momentum) + batch_eps: Union[int, float, np.ndarray] + batch_eps_step: Union[int, float, np.ndarray] + # Compute batch_eps and batch_eps_step if isinstance(eps, np.ndarray) and isinstance(eps_step, np.ndarray): if len(eps.shape) == len(x.shape) and eps.shape[0] == x.shape[0]: diff --git a/art/attacks/evasion/hop_skip_jump.py b/art/attacks/evasion/hop_skip_jump.py index 3a06ba6be8..383f366a87 100644 --- a/art/attacks/evasion/hop_skip_jump.py +++ b/art/attacks/evasion/hop_skip_jump.py @@ -485,7 +485,7 @@ def _binary_search( """ # First set upper and lower bounds as well as the threshold for the binary search if norm == 2: - (upper_bound, lower_bound) = (1, 0) + (upper_bound, lower_bound) = (np.array(1.0), np.array(0.0)) if threshold is None: threshold = self.theta @@ -493,7 +493,7 @@ def _binary_search( else: (upper_bound, lower_bound) = ( np.max(abs(original_sample - current_sample)), - 0, + np.array(0.0), ) if threshold is None: @@ -523,7 +523,7 @@ def _binary_search( result = self._interpolate( current_sample=current_sample, original_sample=original_sample, - alpha=upper_bound, + alpha=float(upper_bound), norm=norm, ) diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py index c03f84c1c6..0d933dd716 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py @@ -540,14 +540,17 @@ def _approximate_power_spectral_density_torch( # compute short-time Fourier transform (STFT) # pylint: disable=W0212 - stft_matrix = torch.stft( - perturbation, - n_fft=self._window_size, - hop_length=self._hop_size, - win_length=self._window_size, - center=False, - window=torch.hann_window(self._window_size).to(self.estimator._device), - ).to(self.estimator._device) + stft_matrix = torch.view_as_real( + torch.stft( + perturbation, + n_fft=self._window_size, + hop_length=self._hop_size, + win_length=self._window_size, + center=False, + window=torch.hann_window(self._window_size).to(self.estimator._device), + return_complex=True, + ).to(self.estimator._device) + ) # compute power spectral density (PSD) # note: fixes implementation of Qin et al. by also considering the square root of gain_factor diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py index eeaf5432fe..cdb03fbbe2 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py @@ -399,7 +399,10 @@ class only supports targeted attack. loss.backward() # Get sign of the gradients - self.global_optimal_delta.grad = torch.sign(self.global_optimal_delta.grad) + if self.global_optimal_delta.grad is not None: + self.global_optimal_delta.grad = torch.sign(self.global_optimal_delta.grad) + else: + raise ValueError("Received None instead of gradient tensor.") # Do optimization self.optimizer_1.step() @@ -747,14 +750,17 @@ def _psd_transform(self, delta: "torch.Tensor", original_max_psd: np.ndarray) -> window_fn = torch.hann_window # type: ignore # Return STFT of delta - delta_stft = torch.stft( - delta, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=False, - window=window_fn(self.win_length).to(self.estimator.device), - ).to(self.estimator.device) + delta_stft = torch.view_as_real( + torch.stft( + delta, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + center=False, + window=window_fn(self.win_length).to(self.estimator.device), + return_complex=True, + ).to(self.estimator.device) + ) # Take abs of complex STFT results transformed_delta = torch.sqrt(torch.sum(torch.square(delta_stft), -1)) diff --git a/art/attacks/evasion/lowprofool.py b/art/attacks/evasion/lowprofool.py index 9fd048ef09..c1b298e0bb 100644 --- a/art/attacks/evasion/lowprofool.py +++ b/art/attacks/evasion/lowprofool.py @@ -141,9 +141,8 @@ def __weighted_lp_norm(self, perturbations: np.ndarray) -> np.ndarray: :param perturbations: Perturbations of samples towards being adversarial. :return: Array with weighted Lp-norm of perturbations. """ - return self.lambd * np.linalg.norm( - self.importance_vec * perturbations, axis=1, ord=(np.inf if self.norm == "inf" else self.norm) - ).reshape(-1, 1) + order: Union[int, float] = np.inf if self.norm == "inf" else float(self.norm) + return self.lambd * np.linalg.norm(self.importance_vec * perturbations, axis=1, ord=order).reshape(-1, 1) def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray: """ diff --git a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py index 22288b62b5..d61f8bb4ac 100644 --- a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py +++ b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py @@ -296,7 +296,10 @@ def _get_loss_gradients(self, x: "torch.Tensor", y: "torch.Tensor", perturbation # Compute gradients loss.backward() grads = eps.grad - grads_batch.append(grads[0, ...]) + if grads is not None: + grads_batch.append(grads[0, ...]) + else: + raise ValueError("Received None instead of gradient tensor.") grads_batch_tensor = torch.stack(grads_batch) diff --git a/art/attacks/evasion/pe_malware_attack.py b/art/attacks/evasion/pe_malware_attack.py index 2c5f9f1d5f..ca3f839804 100644 --- a/art/attacks/evasion/pe_malware_attack.py +++ b/art/attacks/evasion/pe_malware_attack.py @@ -816,7 +816,7 @@ def get_dos_locations(x: np.ndarray) -> Tuple[List[List[int]], List[List[int]]]: size.append(int(0x3C) - mz_offset) start.append(mz_offset) - size.append(pointer_to_pe_header - int(0x40) - 1) + size.append(int(pointer_to_pe_header) - int(0x40) - 1) start.append(int(0x40)) batch_of_starts.append(start) diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index da5ec6fc29..557d8e25de 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -198,6 +198,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size + batch_eps: Union[int, float, np.ndarray] + batch_eps_step: Union[int, float, np.ndarray] + # Compute batch_eps and batch_eps_step if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index bd3e6d46f7..2dddf5b4ec 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -199,6 +199,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size + batch_eps: Union[int, float, np.ndarray] + batch_eps_step: Union[int, float, np.ndarray] + # Compute batch_eps and batch_eps_step if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: diff --git a/art/attacks/extraction/knockoff_nets.py b/art/attacks/extraction/knockoff_nets.py index c1c02eb468..50bcd44751 100644 --- a/art/attacks/extraction/knockoff_nets.py +++ b/art/attacks/extraction/knockoff_nets.py @@ -373,7 +373,7 @@ def _reward_loss(self, y_output: np.ndarray, y_hat: np.ndarray) -> float: return reward - def _reward_all(self, y_output: np.ndarray, y_hat: np.ndarray, n: int) -> np.ndarray: + def _reward_all(self, y_output: np.ndarray, y_hat: np.ndarray, n: int) -> float: """ Compute `all` reward value. @@ -395,7 +395,7 @@ def _reward_all(self, y_output: np.ndarray, y_hat: np.ndarray, n: int) -> np.nda else: reward = [max(min(r, 1), 0) for r in reward] - return np.mean(reward) + return float(np.mean(reward)) def _check_params(self) -> None: if not isinstance(self.batch_size_fit, int) or self.batch_size_fit <= 0: diff --git a/art/attacks/inference/membership_inference/shadow_models.py b/art/attacks/inference/membership_inference/shadow_models.py index bded7129e6..92b37668bd 100644 --- a/art/attacks/inference/membership_inference/shadow_models.py +++ b/art/attacks/inference/membership_inference/shadow_models.py @@ -164,8 +164,8 @@ def _hill_climbing_synthesis( max_iterations: int = 40, max_rejections: int = 3, min_features_randomized: int = 1, - random_record_fn: Callable[[], np.ndarray] = None, - randomize_features_fn: Callable[[np.ndarray, int], np.ndarray] = None, + random_record_fn: Optional[Callable[[], np.ndarray]] = None, + randomize_features_fn: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, ) -> np.ndarray: """ This method implements the hill climbing algorithm from R. Shokri et al. (2017) @@ -247,8 +247,8 @@ def generate_synthetic_shadow_dataset( member_ratio: float = 0.5, min_confidence: float = 0.4, max_retries: int = 6, - random_record_fn: Callable[[], np.ndarray] = None, - randomize_features_fn: Callable[[np.ndarray, int], np.ndarray] = None, + random_record_fn: Optional[Callable[[], np.ndarray]] = None, + randomize_features_fn: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, ) -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]]: """ Generates a shadow dataset (member and nonmember samples and their corresponding model predictions) by training diff --git a/art/attacks/inference/reconstruction/white_box.py b/art/attacks/inference/reconstruction/white_box.py index 551479fae3..3ed3aab5d9 100644 --- a/art/attacks/inference/reconstruction/white_box.py +++ b/art/attacks/inference/reconstruction/white_box.py @@ -83,8 +83,8 @@ def reconstruct(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) - tol = float("inf") x_0 = x[0, :] - x_guess = None - y_guess = None + x_guess: Optional[np.ndarray] = None + y_guess: int for _y in range(self.estimator.nb_classes): args = (_y, x, y, self._estimator, self.estimator, self.params) @@ -97,6 +97,9 @@ def reconstruct(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) - x_guess = _x y_guess = _y + if x_guess is None: + raise ValueError("Guessed values are None.") + x_reconstructed = np.expand_dims(x_guess, axis=0) y_reconstructed = np.zeros(shape=(1, self.estimator.nb_classes)) y_reconstructed[0, y_guess] = 1 diff --git a/art/attacks/poisoning/adversarial_embedding_attack.py b/art/attacks/poisoning/adversarial_embedding_attack.py index 8139c15db5..7ae533311a 100644 --- a/art/attacks/poisoning/adversarial_embedding_attack.py +++ b/art/attacks/poisoning/adversarial_embedding_attack.py @@ -113,7 +113,7 @@ def __init__( BatchNormalization, LeakyReLU, ) - from tensorflow.keras.optimizers import Adam # pylint: disable=E0611 + from tensorflow.keras.optimizers.legacy import Adam # pylint: disable=E0611 opt = Adam(lr=self.learning_rate) @@ -123,7 +123,7 @@ def __init__( from keras.layers import GaussianNoise, Dense, BatchNormalization, LeakyReLU try: - from keras.optimizers import Adam + from keras.optimizers.legacy import Adam opt = Adam(lr=self.learning_rate) except ImportError: diff --git a/art/attacks/poisoning/feature_collision_attack.py b/art/attacks/poisoning/feature_collision_attack.py index 6cbbc866c7..4f2aeeb5cb 100644 --- a/art/attacks/poisoning/feature_collision_attack.py +++ b/art/attacks/poisoning/feature_collision_attack.py @@ -239,7 +239,9 @@ def objective( num_features = base_image.size num_activations = poison_feature_rep.size beta = self.similarity_coeff * (num_activations / num_features) ** 2 - return np.linalg.norm(poison_feature_rep - target_feature_rep) + beta * np.linalg.norm(poison - base_image) + return float( + np.linalg.norm(poison_feature_rep - target_feature_rep) + beta * np.linalg.norm(poison - base_image) + ) def _check_params(self) -> None: if self.learning_rate <= 0: @@ -293,7 +295,11 @@ def tensor_norm(tensor, norm_type: Union[int, float, str] = 2): # pylint: disab :param norm_type: Order of the norm. :return: A tensor with the norm applied. """ - tf_tensor_types = ("tensorflow.python.framework.ops.Tensor", "tensorflow.python.framework.ops.EagerTensor") + tf_tensor_types = ( + "tensorflow.python.framework.ops.Tensor", + "tensorflow.python.framework.ops.EagerTensor", + "tensorflow.python.framework.ops.SymbolicTensor", + ) torch_tensor_types = ("torch.Tensor", "torch.float", "torch.double", "torch.long") mxnet_tensor_types = () supported_types = tf_tensor_types + torch_tensor_types + mxnet_tensor_types diff --git a/art/attacks/poisoning/gradient_matching_attack.py b/art/attacks/poisoning/gradient_matching_attack.py index fa0ffcf85d..3f89c347f2 100644 --- a/art/attacks/poisoning/gradient_matching_attack.py +++ b/art/attacks/poisoning/gradient_matching_attack.py @@ -237,7 +237,7 @@ def get_config(self) -> Dict: """ return {"schedule": self.schedule} - self.optimizer = tf.keras.optimizers.Adam( + self.optimizer = tf.keras.optimizers.legacy.Adam( gradient_transformers=[lambda grads_and_vars: [(tf.sign(g), v) for (g, v) in grads_and_vars]] ) self.lr_schedule = tf.keras.callbacks.LearningRateScheduler(PredefinedLRSchedule(*self.learning_rate_schedule)) @@ -314,11 +314,11 @@ def __init__( self, gradient_matching: GradientMatchingAttack, classifier: PyTorchClassifier, - epsilon: float, - num_poison: int, - len_noise: int, - min_: float, - max_: float, + epsilon, + num_poison, + len_noise, + min_, + max_, ): super().__init__() self.gradient_matching = gradient_matching diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py index ec567a058f..8b2925cf5d 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py @@ -234,7 +234,7 @@ def poison( # pylint: disable=W0221 dist[min_index[0], min_index[1]] = 1e5 loss = np.linalg.norm(feat1 - feat2) ** 2 - losses.update(loss, len(trigger_samples)) + losses.update(float(loss), len(trigger_samples)) # loss gradient computation for KerasClassifier if isinstance(self.estimator, KerasClassifier): diff --git a/art/attacks/poisoning/perturbations/audio_perturbations.py b/art/attacks/poisoning/perturbations/audio_perturbations.py index c7ae909380..1c8c723fe9 100644 --- a/art/attacks/poisoning/perturbations/audio_perturbations.py +++ b/art/attacks/poisoning/perturbations/audio_perturbations.py @@ -21,8 +21,10 @@ because loading the audio trigger from disk (librosa.load()) is very slow and should be done only once. """ -import numpy as np +from typing import Optional + import librosa +import numpy as np class CacheTrigger: @@ -89,7 +91,7 @@ def __init__( self, sampling_rate: int = 16000, backdoor_path: str = "../../../utils/data/backdoors/cough_trigger.wav", - duration: float = None, + duration: Optional[float] = None, **kwargs, ): """ diff --git a/art/attacks/poisoning/poisoning_attack_svm.py b/art/attacks/poisoning/poisoning_attack_svm.py index 688768b5d2..220f10da89 100644 --- a/art/attacks/poisoning/poisoning_attack_svm.py +++ b/art/attacks/poisoning/poisoning_attack_svm.py @@ -49,6 +49,7 @@ class PoisoningAttackSVM(PoisoningAttackWhiteBox): "y_train", "x_val", "y_val", + "max_iter", "verbose", ] _estimator_requirements = (ScikitlearnSVC,) @@ -56,13 +57,13 @@ class PoisoningAttackSVM(PoisoningAttackWhiteBox): def __init__( self, classifier: "ScikitlearnSVC", - step: Optional[float] = None, - eps: Optional[float] = None, - x_train: Optional[np.ndarray] = None, - y_train: Optional[np.ndarray] = None, - x_val: Optional[np.ndarray] = None, - y_val: Optional[np.ndarray] = None, - max_iter: int = 100, + step: float, + eps: float, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + max_iter: int, verbose: bool = True, ) -> None: """ diff --git a/art/attacks/poisoning/sleeper_agent_attack.py b/art/attacks/poisoning/sleeper_agent_attack.py index a6230250e7..b42bddad12 100644 --- a/art/attacks/poisoning/sleeper_agent_attack.py +++ b/art/attacks/poisoning/sleeper_agent_attack.py @@ -101,10 +101,10 @@ def __init__( """ if isinstance(classifier.preprocessing, (StandardisationMeanStdPyTorch, StandardisationMeanStdTensorFlow)): clip_values_normalised = ( - classifier.clip_values - classifier.preprocessing.mean + classifier.clip_values - classifier.preprocessing.mean # type: ignore ) / classifier.preprocessing.std clip_values_normalised = (clip_values_normalised[0], clip_values_normalised[1]) - epsilon_normalised = epsilon * (clip_values_normalised[1] - clip_values_normalised[0]) + epsilon_normalised = epsilon * (clip_values_normalised[1] - clip_values_normalised[0]) # type: ignore patch_normalised = (patch - classifier.preprocessing.mean) / classifier.preprocessing.std else: raise ValueError("classifier.preprocessing not an instance of pytorch/tensorflow") @@ -431,7 +431,7 @@ def _select_poison_indices( classifier.model.trainable = model_trainable else: raise NotImplementedError("SleeperAgentAttack is currently implemented only for PyTorch and TensorFlowV2.") - indices = sorted(range(len(grad_norms)), key=lambda k: grad_norms[k]) + indices = sorted(range(len(grad_norms)), key=lambda k: grad_norms[k]) # type: ignore indices = indices[-num_poison:] return np.array(indices) # this will get only indices for target class diff --git a/art/defences/detector/poison/clustering_analyzer.py b/art/defences/detector/poison/clustering_analyzer.py index 6690197438..27b6855d94 100644 --- a/art/defences/detector/poison/clustering_analyzer.py +++ b/art/defences/detector/poison/clustering_analyzer.py @@ -235,7 +235,7 @@ def analyze_by_relative_size( for c_id in clean_clusters[0]: summary_poison_clusters[i][c_id] = 0 - assigned_clean = self.assign_class(clusters, clean_clusters, poison_clusters) + assigned_clean = self.assign_class(clusters, clean_clusters[0], poison_clusters[0]) all_assigned_clean.append(assigned_clean) # Generate report for this class: @@ -251,7 +251,7 @@ def analyze_by_relative_size( report["Class_" + str(i)] = report_class report["suspicious_clusters"] = report["suspicious_clusters"] + np.sum(summary_poison_clusters).item() - return np.asarray(all_assigned_clean), summary_poison_clusters, report + return np.asarray(all_assigned_clean, dtype=object), summary_poison_clusters, report def analyze_by_silhouette_score( self, @@ -328,7 +328,7 @@ def analyze_by_silhouette_score( logger.info("computed silhouette score: %s", silhouette_avg) dict_i.update(suspicious=True) else: - poison_clusters = [[]] + poison_clusters = (np.array([[]]),) clean_clusters = np.where(percentages >= 0) dict_i.update(suspicious=False) else: @@ -342,8 +342,8 @@ def analyze_by_silhouette_score( for c_id in clean_clusters[0]: summary_poison_clusters[i][c_id] = 0 - assigned_clean = self.assign_class(clusters, clean_clusters, poison_clusters) + assigned_clean = self.assign_class(clusters, clean_clusters[0], poison_clusters[0]) all_assigned_clean.append(assigned_clean) report.update(report_class) - return np.asarray(all_assigned_clean), summary_poison_clusters, report + return np.asarray(all_assigned_clean, dtype=object), summary_poison_clusters, report diff --git a/art/defences/detector/poison/ground_truth_evaluator.py b/art/defences/detector/poison/ground_truth_evaluator.py index 077050374e..6baaf7331b 100644 --- a/art/defences/detector/poison/ground_truth_evaluator.py +++ b/art/defences/detector/poison/ground_truth_evaluator.py @@ -125,7 +125,7 @@ def get_confusion_matrix(self, values: np.ndarray) -> dict: ) if (true_positive + false_negative) == 0: dic_tp = dict( - rate="N/A", + rate=-1, numerator=true_positive, denominator=(true_positive + false_negative), ) @@ -137,7 +137,7 @@ def get_confusion_matrix(self, values: np.ndarray) -> dict: ) if (false_positive + true_negative) == 0: dic_tn = dict( - rate="N/A", + rate=-1, numerator=true_negative, denominator=(false_positive + true_negative), ) @@ -149,7 +149,7 @@ def get_confusion_matrix(self, values: np.ndarray) -> dict: ) if (false_positive + true_negative) == 0: dic_fp = dict( - rate="N/A", + rate=-1, numerator=false_positive, denominator=(false_positive + true_negative), ) @@ -161,7 +161,7 @@ def get_confusion_matrix(self, values: np.ndarray) -> dict: ) if (true_positive + false_negative) == 0: dic_fn = dict( - rate="N/A", + rate=-1, numerator=false_negative, denominator=(true_positive + false_negative), ) diff --git a/art/defences/detector/poison/roni.py b/art/defences/detector/poison/roni.py index 304ad6446e..52372d0d53 100644 --- a/art/defences/detector/poison/roni.py +++ b/art/defences/detector/poison/roni.py @@ -178,11 +178,11 @@ def is_suspicious(self, before_classifier: "CLASSIFIER_TYPE", perf_shift: float) """ if self.calibrated: median, std_dev = self.get_calibration_info(before_classifier) - return perf_shift < median - 3 * std_dev + return bool(perf_shift < median - 3 * std_dev) - return perf_shift < -self.eps + return bool(perf_shift < -self.eps) - def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> Tuple[np.ndarray, np.ndarray]: + def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> Tuple[float, float]: """ Calculate the median and standard deviation of the accuracy shifts caused by the calibration set. @@ -205,7 +205,7 @@ def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> Tuple[np ) ) - return np.median(accs), np.std(accs) + return float(np.median(accs)), float(np.std(accs)) def _check_params(self) -> None: if len(self.x_train) != len(self.y_train): diff --git a/art/defences/trainer/adversarial_trainer.py b/art/defences/trainer/adversarial_trainer.py index 5d1369981e..69aaae252d 100644 --- a/art/defences/trainer/adversarial_trainer.py +++ b/art/defences/trainer/adversarial_trainer.py @@ -111,11 +111,10 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg the target classifier. """ logger.info("Performing adversarial training using %i attacks.", len(self.attacks)) - size = generator.size - if size is None: + if generator.size is None: raise ValueError("Generator size is required and cannot be None.") batch_size = generator.batch_size - nb_batches = int(np.ceil(size / batch_size)) # type: ignore + nb_batches = int(np.ceil(generator.size / batch_size)) # type: ignore ind = np.arange(generator.size) attack_id = 0 @@ -173,7 +172,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg # Otherwise, use precomputed adversarial samples else: - batch_size_current = min(batch_size, size - batch_id * batch_size) + batch_size_current = min(batch_size, generator.size - batch_id * batch_size) nb_adv = int(np.ceil(self.ratio * batch_size_current)) if self.ratio < 1: adv_ids = np.random.choice(batch_size_current, size=nb_adv, replace=False) @@ -183,7 +182,9 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg x_adv = self._precomputed_adv_samples[attack_id] if x_adv is not None: - x_adv = x_adv[ind[batch_id * batch_size : min((batch_id + 1) * batch_size, size)]][adv_ids] + x_adv = x_adv[ind[batch_id * batch_size : min((batch_id + 1) * batch_size, generator.size)]][ + adv_ids + ] x_batch[adv_ids] = x_adv # Fit batch diff --git a/art/defences/trainer/adversarial_trainer_awp_pytorch.py b/art/defences/trainer/adversarial_trainer_awp_pytorch.py index 9a59ea0be6..1b95f0c8bb 100644 --- a/art/defences/trainer/adversarial_trainer_awp_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_awp_pytorch.py @@ -89,7 +89,7 @@ def fit( validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, batch_size: int = 128, nb_epochs: int = 20, - scheduler: "torch.optim.lr_scheduler._LRScheduler" = None, + scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, **kwargs, ): # pylint: disable=W0221 """ @@ -198,7 +198,7 @@ def fit_generator( generator: DataGenerator, validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, nb_epochs: int = 20, - scheduler: "torch.optim.lr_scheduler._LRScheduler" = None, + scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, **kwargs, ): # pylint: disable=W0221 """ diff --git a/art/defences/trainer/adversarial_trainer_trades_pytorch.py b/art/defences/trainer/adversarial_trainer_trades_pytorch.py index c965635419..3763d571e8 100644 --- a/art/defences/trainer/adversarial_trainer_trades_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_trades_pytorch.py @@ -69,7 +69,7 @@ def fit( validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, batch_size: int = 128, nb_epochs: int = 20, - scheduler: "torch.optim.lr_scheduler._LRScheduler" = None, + scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, **kwargs ): # pylint: disable=W0221 """ @@ -158,7 +158,7 @@ def fit_generator( self, generator: DataGenerator, nb_epochs: int = 20, - scheduler: "torch.optim.lr_scheduler._LRScheduler" = None, + scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, **kwargs ): # pylint: disable=W0221 """ diff --git a/art/defences/trainer/certified_adversarial_trainer_pytorch.py b/art/defences/trainer/certified_adversarial_trainer_pytorch.py index 099b9eccfa..ab3bac9e36 100644 --- a/art/defences/trainer/certified_adversarial_trainer_pytorch.py +++ b/art/defences/trainer/certified_adversarial_trainer_pytorch.py @@ -225,7 +225,7 @@ def fit( # pylint: disable=W0221 y_preprocessed = self.classifier.reduce_labels(y_preprocessed) num_batch = int(np.ceil(len(x_preprocessed) / float(self.pgd_params["batch_size"]))) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() x_cert = np.copy(x_preprocessed) y_cert = np.copy(y_preprocessed) @@ -287,7 +287,8 @@ def fit( # pylint: disable=W0221 if certification_loss == "max_logit_loss": certified_loss += self.classifier.max_logit_loss( - prediction=torch.cat((bias, eps)), target=np.expand_dims(label, axis=0) + prediction=torch.cat((bias, eps)), + target=torch.from_numpy(np.expand_dims(label, axis=0)).to(self.classifier.device), ) elif certification_loss == "interval_loss_cce": certified_loss += self.classifier.interval_loss_cce( diff --git a/art/defences/trainer/ibp_certified_trainer_pytorch.py b/art/defences/trainer/ibp_certified_trainer_pytorch.py index 2d5e6abb0e..b25ffdbdfb 100644 --- a/art/defences/trainer/ibp_certified_trainer_pytorch.py +++ b/art/defences/trainer/ibp_certified_trainer_pytorch.py @@ -288,7 +288,7 @@ def fit( # pylint: disable=W0221 y_preprocessed = self.classifier.reduce_labels(y_preprocessed) num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() x_cert = np.copy(x_preprocessed) y_cert = np.copy(y_preprocessed) diff --git a/art/defences/transformer/poisoning/strip.py b/art/defences/transformer/poisoning/strip.py index 6bc7b04ed7..dcd1463c1d 100644 --- a/art/defences/transformer/poisoning/strip.py +++ b/art/defences/transformer/poisoning/strip.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TypeVar, TYPE_CHECKING +from typing import Optional, TYPE_CHECKING import numpy as np @@ -33,8 +33,6 @@ if TYPE_CHECKING: from art.utils import CLASSIFIER_TYPE - ClassifierWithStrip = TypeVar("ClassifierWithStrip", CLASSIFIER_TYPE, STRIPMixin) - logger = logging.getLogger(__name__) @@ -63,7 +61,7 @@ def __call__( # type: ignore self, num_samples: int = 20, false_acceptance_rate: float = 0.01, - ) -> "ClassifierWithStrip": + ) -> "CLASSIFIER_TYPE": """ Create a STRIP defense diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index cd3e53243b..be227c73d2 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -488,7 +488,7 @@ def fit( # pylint: disable=W0221 num_batch = len(x_preprocessed) / float(batch_size) num_batch = int(np.floor(num_batch)) if drop_last else int(np.ceil(num_batch)) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() # Start training for _ in tqdm(range(nb_epochs)): diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index 6cc958acb3..6961edf483 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -67,7 +67,7 @@ def __init__( logits: bool, input_shape: Tuple[int, ...], loss_object: Optional["tf.Tensor"] = None, - optimizer: Optional["tf.keras.optimizers.Optimizer"] = None, + optimizer: Optional["tf.keras.optimizers.legacy.Optimizer"] = None, train_step: Optional[Callable] = None, channels_first: bool = False, clip_values: Optional["CLIP_VALUES_TYPE"] = None, diff --git a/art/estimators/certification/randomized_smoothing/numpy.py b/art/estimators/certification/randomized_smoothing/numpy.py index a20d201674..bb27876c41 100644 --- a/art/estimators/certification/randomized_smoothing/numpy.py +++ b/art/estimators/certification/randomized_smoothing/numpy.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import List, Union, TYPE_CHECKING, Tuple +from typing import List, Optional, Union, TYPE_CHECKING, Tuple import warnings import numpy as np @@ -141,7 +141,11 @@ def loss_gradient( # pylint: disable=W0221 return self.classifier.loss_gradient(x=x, y=y, training_mode=training_mode, **kwargs) # type: ignore def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int]] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. diff --git a/art/estimators/certification/randomized_smoothing/pytorch.py b/art/estimators/certification/randomized_smoothing/pytorch.py index a2f8fd44f7..ab2b8401a1 100644 --- a/art/estimators/certification/randomized_smoothing/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/pytorch.py @@ -302,7 +302,11 @@ def loss_gradient( # type: ignore return gradients def class_gradient( - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. diff --git a/art/estimators/certification/randomized_smoothing/tensorflow.py b/art/estimators/certification/randomized_smoothing/tensorflow.py index ef8e5720c0..543c197f9d 100644 --- a/art/estimators/certification/randomized_smoothing/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/tensorflow.py @@ -272,7 +272,11 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals return gradients def class_gradient( - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. diff --git a/art/estimators/classification/GPy.py b/art/estimators/classification/GPy.py index 7fb0f8e22f..458bc961a0 100644 --- a/art/estimators/classification/GPy.py +++ b/art/estimators/classification/GPy.py @@ -92,7 +92,7 @@ def input_shape(self) -> Tuple[int, ...]: # pylint: disable=W0221 def class_gradient( # type: ignore - self, x: np.ndarray, label: Union[int, List[int], None] = None, eps: float = 0.0001, **kwargs + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, eps: float = 0.0001, **kwargs ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/classification/classifier.py b/art/estimators/classification/classifier.py index b98c9e06a0..191f4a784a 100644 --- a/art/estimators/classification/classifier.py +++ b/art/estimators/classification/classifier.py @@ -128,7 +128,9 @@ class `Classifier`. """ @abstractmethod - def class_gradient(self, x: np.ndarray, label: Optional[Union[int, List[int]]] = None, **kwargs) -> np.ndarray: + def class_gradient( + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs + ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/classification/deep_partition_ensemble.py b/art/estimators/classification/deep_partition_ensemble.py index 82dbb9cf40..e309db2bac 100644 --- a/art/estimators/classification/deep_partition_ensemble.py +++ b/art/estimators/classification/deep_partition_ensemble.py @@ -160,7 +160,7 @@ def fit( # pylint: disable=W0221 y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, - train_dict: Dict = None, + train_dict: Optional[Dict] = None, **kwargs ) -> None: """ diff --git a/art/estimators/classification/detector_classifier.py b/art/estimators/classification/detector_classifier.py index 62d4d9a339..e3733352bb 100644 --- a/art/estimators/classification/detector_classifier.py +++ b/art/estimators/classification/detector_classifier.py @@ -142,7 +142,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg def class_gradient( # pylint: disable=W0221 self, x: np.ndarray, - label: Union[int, List[int], np.ndarray, None] = None, + label: Optional[Union[int, List[int], np.ndarray]] = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -216,7 +216,7 @@ def class_gradient( # pylint: disable=W0221 # First compute the classifier gradients for classifier_idx if classifier_idx: combined_grads[classifier_idx] = self.classifier.class_gradient( - x=x[classifier_idx], label=label[classifier_idx], training_mode=training_mode, **kwargs + x=x[classifier_idx], label=label[classifier_idx].tolist(), training_mode=training_mode, **kwargs ) # Then compute the detector gradients for detector_idx diff --git a/art/estimators/classification/ensemble.py b/art/estimators/classification/ensemble.py index 96007002cf..c02508330b 100644 --- a/art/estimators/classification/ensemble.py +++ b/art/estimators/classification/ensemble.py @@ -249,7 +249,7 @@ def get_activations( def class_gradient( # pylint: disable=W0221 self, x: np.ndarray, - label: Union[int, List[int], None] = None, + label: Optional[Union[int, List[int], np.ndarray]] = None, training_mode: bool = False, raw: bool = False, **kwargs, diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index 3ea53848e1..21bb9afcee 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -459,7 +459,11 @@ def loss_gradient( # pylint: disable=W0221 return gradients def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Optional[Union[int, List[int]]] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -757,7 +761,7 @@ def _init_class_gradients(self, label: Optional[Union[int, List[int], np.ndarray if isinstance(label, int): unique_labels = [label] else: - unique_labels = np.unique(label) + unique_labels = np.unique(label).tolist() logger.debug("Computing class gradients for classes %s.", str(unique_labels)) if not hasattr(self, "_class_gradients_idx"): diff --git a/art/estimators/classification/mxnet.py b/art/estimators/classification/mxnet.py index 59afc18622..b8481d68b9 100644 --- a/art/estimators/classification/mxnet.py +++ b/art/estimators/classification/mxnet.py @@ -300,7 +300,11 @@ def predict( # pylint: disable=W0221 return predictions def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index 385ad7c58e..7636d3d393 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -561,7 +561,11 @@ def weight_reset(module): self.model.apply(weight_reset) def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -598,6 +602,8 @@ def class_gradient( # pylint: disable=W0221 self.set_batchnorm(train=False) self.set_dropout(train=False) + if isinstance(label, list): + label = np.array(label) if not ( (label is None) or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes)) diff --git a/art/estimators/classification/query_efficient_bb.py b/art/estimators/classification/query_efficient_bb.py index 5c05115de1..748617e3cb 100644 --- a/art/estimators/classification/query_efficient_bb.py +++ b/art/estimators/classification/query_efficient_bb.py @@ -118,7 +118,9 @@ def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np. ) return minus, plus - def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray: + def class_gradient( + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs + ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/classification/scikitlearn.py b/art/estimators/classification/scikitlearn.py index 9ae4bdf928..43aeee07b8 100644 --- a/art/estimators/classification/scikitlearn.py +++ b/art/estimators/classification/scikitlearn.py @@ -785,7 +785,9 @@ def __init__( preprocessing=preprocessing, ) - def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray: + def class_gradient( + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs + ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -1025,7 +1027,9 @@ def __init__( ) self._kernel = self._kernel_func() - def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray: + def class_gradient( + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs + ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index 8dca946cbd..c71445afb8 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -294,7 +294,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in y_preprocessed = np.argmax(y_preprocessed, axis=1) num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() # Start training for _ in range(nb_epochs): @@ -359,7 +359,11 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg super().fit_generator(generator, nb_epochs=nb_epochs, **kwargs) def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -1072,7 +1076,11 @@ def train_step(model, images, labels): super().fit_generator(generator, nb_epochs=nb_epochs) def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/estimators/object_detection/pytorch_detection_transformer.py b/art/estimators/object_detection/pytorch_detection_transformer.py index 9f1389398e..d7cb0f0398 100644 --- a/art/estimators/object_detection/pytorch_detection_transformer.py +++ b/art/estimators/object_detection/pytorch_detection_transformer.py @@ -51,7 +51,7 @@ class PyTorchDetectionTransformer(ObjectDetectorMixin, PyTorchEstimator): def __init__( self, - model: "torch.nn.Module" = None, + model: Optional["torch.nn.Module"] = None, input_shape: Tuple[int, ...] = (3, 800, 800), clip_values: Optional["CLIP_VALUES_TYPE"] = None, channels_first: Optional[bool] = True, @@ -289,7 +289,7 @@ def _get_losses( y_tensor.append(y_t) elif y is not None and isinstance(y[0]["boxes"], np.ndarray): y_tensor = [] - for y_i in y_preprocessed: + for y_i in y: y_t = { "boxes": torch.from_numpy(y_i["boxes"]).type(torch.float).to(self.device), "labels": torch.from_numpy(y_i["labels"]).type(torch.int64).to(self.device), diff --git a/art/estimators/object_tracking/pytorch_goturn.py b/art/estimators/object_tracking/pytorch_goturn.py index 6c434e0197..5adf2d33cd 100644 --- a/art/estimators/object_tracking/pytorch_goturn.py +++ b/art/estimators/object_tracking/pytorch_goturn.py @@ -353,8 +353,8 @@ def _preprocess(self, img: "torch.Tensor") -> "torch.Tensor": mean_np = self.preprocessing.mean std_np = self.preprocessing.std else: - mean_np = np.ones((3, 1, 1)) - std_np = np.ones((3, 1, 1)) + mean_np = np.ones(shape=(3, 1, 1), dtype=float) + std_np = np.ones(shape=(3, 1, 1), dtype=float) mean = torch.from_numpy(mean_np).reshape((3, 1, 1)) std = torch.from_numpy(std_np).reshape((3, 1, 1)) img = img.permute(2, 0, 1) diff --git a/art/estimators/poison_mitigation/neural_cleanse/keras.py b/art/estimators/poison_mitigation/neural_cleanse/keras.py index 8a58a10ecc..5c3146e647 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/keras.py +++ b/art/estimators/poison_mitigation/neural_cleanse/keras.py @@ -182,7 +182,7 @@ def __init__( self.loss_combined = self.loss_ce + self.loss_reg * self.cost_tensor try: - from keras.optimizers import Adam + from keras.optimizers.legacy import Adam self.opt = Adam(lr=self.learning_rate, beta_1=0.5, beta_2=0.9) except ImportError: @@ -228,7 +228,7 @@ def generate_backdoor( :return: A tuple of the pattern and mask for the model. """ import keras.backend as K - from keras_preprocessing.image import ImageDataGenerator + from keras.preprocessing.image import ImageDataGenerator self.reset() datagen = ImageDataGenerator() @@ -391,7 +391,11 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals return self.loss_gradient(x=x, y=y, training_mode=training_mode, **kwargs) def class_gradient( - self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs + self, + x: np.ndarray, + label: Optional[Union[int, List[int], np.ndarray]] = None, + training_mode: bool = False, + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. diff --git a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py index 120e869b79..62c6f82c08 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py +++ b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py @@ -200,7 +200,7 @@ def mitigate(self, x_val: np.ndarray, y_val: np.ndarray, mitigation_types: List[ # get indices of top 1% of ranked neurons num_top = int(np.ceil(len(ranked_indices) * 0.01)) - self.top_indices = ranked_indices[:num_top] + self.top_indices = ranked_indices[:num_top].tolist() # measure average activation for clean images and backdoor images avg_clean_activation = np.average(clean_activations[:, self.top_indices], axis=0) diff --git a/art/estimators/regression/blackbox.py b/art/estimators/regression/blackbox.py index cdd3cc2844..4339692b4f 100644 --- a/art/estimators/regression/blackbox.py +++ b/art/estimators/regression/blackbox.py @@ -49,7 +49,7 @@ def __init__( self, predict_fn: Union[Callable, Tuple[np.ndarray, np.ndarray]], input_shape: Tuple[int, ...], - loss_fn: Callable = None, + loss_fn: Optional[Callable] = None, clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 4fde400495..0cdb2a134a 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -146,7 +146,10 @@ def __init__( # Check DeepSpeech version if str(DeepSpeech.__base__) == "": self._version = 2 - elif str(DeepSpeech.__base__) == "": + elif str(DeepSpeech.__base__) in [ + "", + "", + ]: self._version = 3 else: raise NotImplementedError("Only DeepSpeech version 2 and DeepSpeech version 3 are currently supported.") @@ -381,7 +384,7 @@ def predict( # Call to DeepSpeech model for prediction with torch.no_grad(): - outputs, output_sizes = self._model( + outputs, output_sizes, _ = self._model( inputs[begin:end].to(self._device), input_sizes[begin:end].to(self._device) ) @@ -455,7 +458,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: input_sizes = input_rates.mul_(inputs.size()[-1]).int() # Call to DeepSpeech model for prediction - outputs, output_sizes = self._model(inputs.to(self._device), input_sizes.to(self._device)) + outputs, output_sizes, _ = self._model(inputs.to(self._device), input_sizes.to(self._device)) outputs = outputs.transpose(0, 1) if self._version == 2: @@ -534,7 +537,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in # Train with batch processing num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() # Start training for _ in range(nb_epochs): @@ -566,7 +569,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in self.optimizer.zero_grad() # Call to DeepSpeech model for prediction - outputs, output_sizes = self._model(inputs.to(self._device), input_sizes.to(self._device)) + outputs, output_sizes, _ = self._model(inputs.to(self._device), input_sizes.to(self._device)) outputs = outputs.transpose(0, 1) if self._version == 2: @@ -625,7 +628,7 @@ def compute_loss_and_decoded_output( input_sizes = input_rates.mul_(inputs.size()[-1]).int() # Call to DeepSpeech model for prediction - outputs, output_sizes = self.model(inputs.to(self.device), input_sizes.to(self.device)) + outputs, output_sizes, _ = self.model(inputs.to(self.device), input_sizes.to(self.device)) outputs_ = outputs.transpose(0, 1) if self._version == 2: diff --git a/art/experimental/estimators/classification/jax.py b/art/experimental/estimators/classification/jax.py index dfabdef184..4c7310307c 100644 --- a/art/experimental/estimators/classification/jax.py +++ b/art/experimental/estimators/classification/jax.py @@ -192,7 +192,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) - ind = np.arange(len(x_preprocessed)) + ind = np.arange(len(x_preprocessed)).tolist() # Start training for _ in range(nb_epochs): @@ -236,7 +236,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg raise NotImplementedError def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs + self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. diff --git a/art/metrics/metrics.py b/art/metrics/metrics.py index 473c4bfae1..97c84afa50 100644 --- a/art/metrics/metrics.py +++ b/art/metrics/metrics.py @@ -90,10 +90,10 @@ def get_crafter(classifier: "CLASSIFIER_TYPE", attack: str, params: Optional[Dic def adversarial_accuracy( classifier: "CLASSIFIER_TYPE", x: np.ndarray, - y: np.ndarray = None, - attack_name: str = None, + y: Optional[np.ndarray] = None, + attack_name: Optional[str] = None, attack_params: Optional[Dict[str, Any]] = None, - attack_crafter: EvasionAttack = None, + attack_crafter: Optional[EvasionAttack] = None, ) -> float: """ Compute the adversarial accuracy of a classifier object over the sample `x` for a given adversarial crafting diff --git a/art/metrics/privacy/membership_leakage.py b/art/metrics/privacy/membership_leakage.py index dd4e0d03bf..1a4defc427 100644 --- a/art/metrics/privacy/membership_leakage.py +++ b/art/metrics/privacy/membership_leakage.py @@ -131,7 +131,7 @@ def PDTP( # pylint: disable=C0103 # get max value max_value: float = max(ratio_1.max(), ratio_2.max()) elif comparison_type == ComparisonType.DIFFERENCE: - max_value = np.max(abs(pred_bin - alt_pred_bin)) + max_value = float(np.max(abs(pred_bin - alt_pred_bin))) else: raise ValueError("Unsupported comparison type.") iter_results.append(max_value) diff --git a/art/utils.py b/art/utils.py index b409e8d15a..7c4ff28348 100644 --- a/art/utils.py +++ b/art/utils.py @@ -435,10 +435,10 @@ def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n # The vector of reductions delta_vec = np.transpose(np.array([delta] * (n - j - 1))) # The sub-vectors: a_sorted[:, (j+1):] - a_sub = a_sorted[:, (j + 1) :] + a_sub = a_sorted[:, int(j + 1) :] # After reduction by delta_vec a_after = a_sub - delta_vec - after_vec[:, (j + 1) :] = a_after + after_vec[:, int(j + 1) :] = a_after proj += act_multiplier * (after_vec - proj) active = active * ind_set if sum(active) == 0: @@ -983,7 +983,7 @@ def compute_success_array( x_adv: np.ndarray, targeted: bool = False, batch_size: int = 1, -) -> float: +) -> np.ndarray: """ Compute the success rate of an attack based on clean samples, adversarial samples and targets or correct labels. @@ -1304,12 +1304,12 @@ def load_stl() -> DATASET_TYPE: x_test = x_test.transpose((0, 2, 3, 1)) with open(os.path.join(path, "train_y.bin"), "rb") as f_numpy: - y_train = np.fromfile(f_numpy, dtype=np.uint8) - y_train -= 1 + y_train_uint = np.fromfile(f_numpy, dtype=np.uint8) + y_train = y_train_uint - 1 with open(os.path.join(path, "test_y.bin"), "rb") as f_numpy: - y_test = np.fromfile(f_numpy, dtype=np.uint8) - y_test -= 1 + y_test_uint = np.fromfile(f_numpy, dtype=np.uint8) + y_test = y_test_uint - 1 x_train, y_train = preprocess(x_train, y_train) x_test, y_test = preprocess(x_test, y_test) diff --git a/readthedocs.yml b/readthedocs.yml index a24c500806..e7467d2df7 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,10 +1,16 @@ version: 2 + +build: + os: ubuntu-20.04 + tools: + python: "3.9" + +python: + install: + - method: pip + path: . + extra_requirements: + - docs + sphinx: configuration: docs/conf.py -python: - version: 3.6 - install: - - method: pip - path: . - extra_requirements: - - docs diff --git a/requirements_test.txt b/requirements_test.txt index 2d3dbc64b4..54337efac8 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -5,25 +5,25 @@ scipy==1.10.1 matplotlib==3.7.1 scikit-learn>=0.22.2,<1.2.0 six==1.16.0 -Pillow==9.5.0 -tqdm==4.65.0 +Pillow==10.1.0 +tqdm==4.66.1 statsmodels==0.13.5 pydub==0.25.1 resampy==0.4.2 ffmpeg-python==0.2.0 cma==3.3.0 -pandas==2.0.1 +pandas==2.1.4 librosa==0.10.0.post2 numba~=0.58.1 opencv-python sortedcontainers==2.4.0 -h5py==3.8.0 +h5py==3.10.0 multiprocess>=0.70.12 # frameworks -tensorflow==2.10.1 -keras==2.10.0 +tensorflow==2.14.0 +keras==2.14.0 tensorflow-addons>=0.13.0 # using mxnet-native for reproducible test results on CI machines without Intel Architecture Processors, but mxnet is fully supported by ART @@ -31,22 +31,22 @@ mxnet-native==1.8.0.post0 # PyTorch --find-links https://download.pytorch.org/whl/cpu/torch_stable.html -torch==1.13.1 -torchaudio==0.13.1+cpu -torchvision==0.14.1+cpu +torch==2.1.2 +torchaudio==2.1.2 +torchvision==0.16.2+cpu # PyTorch image transformers timm==0.9.2 catboost==1.1.1 GPy==1.10.0 -lightgbm==3.3.5 -xgboost==1.7.5 +lightgbm==4.1.0 +xgboost==2.0.2 kornia~=0.6.12 -tensorboardX==2.6 +tensorboardX==2.6.2.2 lief==0.12.3 -jax[cpu]==0.4.8 +jax[cpu]==0.4.23 # Lingvo ASR dependencies # supported versions: (lingvo==0.6.4 with tensorflow-gpu==2.1.0) @@ -55,7 +55,7 @@ jax[cpu]==0.4.8 # lingvo==0.6.4 # other -pytest~=7.3.1 +pytest~=7.4.3 pytest-flake8~=1.1.1 flake8~=4.0.1 pytest-mock~=3.10.0 diff --git a/tests/attacks/evasion/test_auto_attack.py b/tests/attacks/evasion/test_auto_attack.py index 9dc71a0895..52e76274a6 100644 --- a/tests/attacks/evasion/test_auto_attack.py +++ b/tests/attacks/evasion/test_auto_attack.py @@ -193,11 +193,18 @@ def test_classifier_type_check_fail(art_warning): art_warning(e) -@pytest.mark.skip_framework("tensorflow1", "tensorflow2v1", "keras", "non_dl_frameworks", "mxnet", "kerastf") -def test_generate_parallel(art_warning, fix_get_mnist_subset, image_dl_estimator): +@pytest.mark.skip_framework( + "tensorflow1", "tensorflow2v1", "tensorflow2", "keras", "non_dl_frameworks", "mxnet", "kerastf" +) +def test_generate_parallel(art_warning, fix_get_mnist_subset, image_dl_estimator, framework): try: classifier, _ = image_dl_estimator(from_logits=True) + if framework == "tensorflow2": + import tensorflow as tf + + classifier.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01)) + norm = np.inf eps = 0.3 eps_step = 0.1 @@ -282,10 +289,11 @@ def test_generate_parallel(art_warning, fix_get_mnist_subset, image_dl_estimator ) x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist) + x_train_mnist_adv_nop = attack_noparallel.generate(x=x_train_mnist, y=y_train_mnist) - assert np.mean(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(0.0182, abs=0.105) - assert np.max(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(0.3, abs=0.05) + assert np.mean(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(expected=0.0182, abs=0.105) + assert np.max(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(expected=0.3, abs=0.05) noparallel_perturbation = np.linalg.norm(x_train_mnist[[2]] - x_train_mnist_adv_nop[[2]]) parallel_perturbation = np.linalg.norm(x_train_mnist[[2]] - x_train_mnist_adv[[2]]) @@ -307,7 +315,8 @@ def test_generate_parallel(art_warning, fix_get_mnist_subset, image_dl_estimator x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist) - assert np.mean(x_train_mnist_adv - x_train_mnist) == pytest.approx(0.0, abs=0.0075) - assert np.max(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(eps, abs=0.005) + assert np.mean(x_train_mnist_adv - x_train_mnist) == pytest.approx(expected=0.0, abs=0.0075) + assert np.max(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(expected=eps, abs=0.005) + except ARTTestException as e: art_warning(e) diff --git a/tests/attacks/inference/attribute_inference/test_true_label_baseline.py b/tests/attacks/inference/attribute_inference/test_true_label_baseline.py index 67d84f8116..05de0df14a 100644 --- a/tests/attacks/inference/attribute_inference/test_true_label_baseline.py +++ b/tests/attacks/inference/attribute_inference/test_true_label_baseline.py @@ -231,7 +231,7 @@ def transform_feature(x): ) expected_train_acc = {"nn": 0.81, "rf": 0.98, "gb": 0.98, "lr": 0.81, "dt": 0.98, "knn": 0.85, "svm": 0.81} - expected_test_acc = {"nn": 0.88, "rf": 0.83, "gb": 0.75, "lr": 0.88, "dt": 0.8, "knn": 0.82, "svm": 0.88} + expected_test_acc = {"nn": 0.88, "rf": 0.81, "gb": 0.75, "lr": 0.88, "dt": 0.8, "knn": 0.82, "svm": 0.88} assert expected_train_acc[model_type] <= baseline_train_acc assert expected_test_acc[model_type] <= baseline_test_acc @@ -606,7 +606,7 @@ def transform_other_feature(x): ) expected_train_acc = {"nn": 0.81, "rf": 0.95, "gb": 0.95, "lr": 0.81, "dt": 0.94, "knn": 0.87, "svm": 0.81} - expected_test_acc = {"nn": 0.88, "rf": 0.82, "gb": 0.8, "lr": 0.88, "dt": 0.74, "knn": 0.86, "svm": 0.88} + expected_test_acc = {"nn": 0.88, "rf": 0.79, "gb": 0.8, "lr": 0.88, "dt": 0.74, "knn": 0.86, "svm": 0.88} assert expected_train_acc[model_type] <= baseline_train_acc assert expected_test_acc[model_type] <= baseline_test_acc diff --git a/tests/attacks/test_adversarial_patch.py b/tests/attacks/test_adversarial_patch.py index d181723864..ca850ff2f1 100644 --- a/tests/attacks/test_adversarial_patch.py +++ b/tests/attacks/test_adversarial_patch.py @@ -301,7 +301,7 @@ def test_4_pytorch(self): self.assertAlmostEqual(patch_adv[0, 8, 8], 0.5, delta=0.05) self.assertAlmostEqual(patch_adv[0, 14, 14], 0.5, delta=0.05) - self.assertAlmostEqual(float(np.sum(patch_adv)), 371.88014772999827, delta=4.0) + self.assertAlmostEqual(float(np.sum(patch_adv)), 367.6218066346819, delta=4.0) mask = np.ones((1, 28, 28)).astype(bool) attack_ap.apply_patch(x=x_train, scale=0.1, mask=mask) diff --git a/tests/attacks/test_copycat_cnn.py b/tests/attacks/test_copycat_cnn.py index a93a212be2..74c0c669ee 100644 --- a/tests/attacks/test_copycat_cnn.py +++ b/tests/attacks/test_copycat_cnn.py @@ -139,7 +139,7 @@ def test_keras_classifier(self): model.add(Dense(10, activation="softmax")) loss = keras.losses.categorical_crossentropy try: - from keras.optimizers import Adam + from keras.optimizers.legacy import Adam optimizer = Adam(lr=0.001) except ImportError: @@ -365,7 +365,7 @@ def test_keras_iris(self): model.add(Dense(10, activation="relu")) model.add(Dense(3, activation="softmax")) try: - from keras.optimizers import Adam + from keras.optimizers.legacy import Adam optimizer = Adam(lr=0.001) except ImportError: diff --git a/tests/attacks/test_poisoning_attack_svm.py b/tests/attacks/test_poisoning_attack_svm.py index 6104acd8d2..8c416b135a 100644 --- a/tests/attacks/test_poisoning_attack_svm.py +++ b/tests/attacks/test_poisoning_attack_svm.py @@ -85,7 +85,7 @@ def setUpIRIS(cls): order = np.random.permutation(n_sample) x_train = x_train[order] - y_train = y_train[order].astype(np.float) + y_train = y_train[order].astype(float) x_train = x_train[: int(0.9 * n_sample)] y_train = y_train[: int(0.9 * n_sample)] @@ -159,7 +159,9 @@ def test_SVC_kernels(self): clean.fit(x_train, y_train) poison = SklearnClassifier(model=SVC(kernel=kernel, gamma="auto"), clip_values=clip_values) poison.fit(x_train, y_train) - attack = PoisoningAttackSVM(poison, 0.01, 1.0, x_train, y_train, x_test, y_test, 100) + attack = PoisoningAttackSVM( + poison, step=0.01, eps=1.0, x_train=x_train, y_train=y_train, x_val=x_test, y_val=y_test, max_iter=100 + ) attack_y = np.array([1, 1]) - y_train[0] attack_point, _ = attack.poison(np.array([x_train[0]]), y=np.array([attack_y])) poison.fit( @@ -177,7 +179,18 @@ def test_SVC_kernels(self): self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001) def test_classifier_type_check_fail(self): - backend_test_classifier_type_check_fail(PoisoningAttackSVM, [ScikitlearnSVC]) + (x_train, y_train), (x_test, y_test), min_, max_ = self.iris + backend_test_classifier_type_check_fail( + PoisoningAttackSVM, + [ScikitlearnSVC], + step=0.01, + eps=1.0, + x_train=x_train, + y_train=y_train, + x_val=x_test, + y_val=y_test, + max_iter=100, + ) def test_check_params(self): (x_train, y_train), (x_test, y_test), min_, max_ = self.iris diff --git a/tests/classifiersFrameworks/test_tensorflow.py b/tests/classifiersFrameworks/test_tensorflow.py index d4a8617626..7288692417 100644 --- a/tests/classifiersFrameworks/test_tensorflow.py +++ b/tests/classifiersFrameworks/test_tensorflow.py @@ -239,7 +239,7 @@ def test_binary_keras_instantiation_and_attack_pgd(art_warning): ] ) model.summary() - model.compile(optimizer=tf.optimizers.Adam(), loss="binary_crossentropy", metrics=["accuracy"]) + model.compile(optimizer=tf.optimizers.legacy.Adam(), loss="binary_crossentropy", metrics=["accuracy"]) classifier = KerasClassifier(model=model) classifier.fit(train_x, train_y, nb_epochs=5) pred = classifier.predict(test_x) @@ -268,7 +268,7 @@ def test_binary_keras_instantiation_and_attack_pgd(art_warning): # ] # ) # model.summary() -# model.compile(optimizer=tf.optimizers.Adam(), loss="binary_crossentropy", metrics=["accuracy"]) +# model.compile(optimizer=tf.optimizers.legacy.Adam(), loss="binary_crossentropy", metrics=["accuracy"]) # classifier = art.estimators.classification.TensorFlowV2Classifier(model=model) # classifier.fit(train_x, train_y, nb_epochs=5) # pred = classifier.predict(test_x) diff --git a/tests/defences/detector/poison/test_activation_defence.py b/tests/defences/detector/poison/test_activation_defence.py index 83d450d8f8..96e73590ec 100644 --- a/tests/defences/detector/poison/test_activation_defence.py +++ b/tests/defences/detector/poison/test_activation_defence.py @@ -20,7 +20,7 @@ import logging import unittest -from keras_preprocessing.image import ImageDataGenerator +from keras.preprocessing.image import ImageDataGenerator import numpy as np from art.data_generators import KerasDataGenerator @@ -118,7 +118,7 @@ def test_output_clusters(self): clusters_by_class, _ = self.defence.cluster_activations(nb_clusters=nb_clusters) # Verify expected number of classes - self.assertEqual(np.shape(clusters_by_class)[0], n_classes) + self.assertEqual(len(clusters_by_class), n_classes) # Check we get the expected number of clusters: found_clusters = len(np.unique(clusters_by_class[0])) self.assertEqual(found_clusters, nb_clusters) diff --git a/tests/defences/detector/poison/test_ground_truth_evaluator.py b/tests/defences/detector/poison/test_ground_truth_evaluator.py index 3d50ba2fdd..d575b16236 100644 --- a/tests/defences/detector/poison/test_ground_truth_evaluator.py +++ b/tests/defences/detector/poison/test_ground_truth_evaluator.py @@ -64,9 +64,9 @@ def test_analyze_correct_all_clean(self): # print(json_object) for i in range(self.n_classes): res_class_i = json_object["class_" + str(i)] - self.assertEqual(res_class_i["TruePositive"]["rate"], "N/A") + self.assertEqual(res_class_i["TruePositive"]["rate"], -1) self.assertEqual(res_class_i["TrueNegative"]["rate"], 100) - self.assertEqual(res_class_i["FalseNegative"]["rate"], "N/A") + self.assertEqual(res_class_i["FalseNegative"]["rate"], -1) self.assertEqual(res_class_i["FalsePositive"]["rate"], 0) self.assertEqual(res_class_i["TruePositive"]["numerator"], 0) @@ -99,9 +99,9 @@ def test_analyze_correct_all_poison(self): for i in range(self.n_classes): res_class_i = json_object["class_" + str(i)] self.assertEqual(res_class_i["TruePositive"]["rate"], 100) - self.assertEqual(res_class_i["TrueNegative"]["rate"], "N/A") + self.assertEqual(res_class_i["TrueNegative"]["rate"], -1) self.assertEqual(res_class_i["FalseNegative"]["rate"], 0) - self.assertEqual(res_class_i["FalsePositive"]["rate"], "N/A") + self.assertEqual(res_class_i["FalsePositive"]["rate"], -1) self.assertEqual(res_class_i["TruePositive"]["numerator"], self.n_dp) self.assertEqual(res_class_i["TruePositive"]["denominator"], self.n_dp) @@ -166,9 +166,9 @@ def test_analyze_fully_misclassified(self): for i in range(self.n_classes): res_class_i = json_object["class_" + str(i)] self.assertEqual(res_class_i["TruePositive"]["rate"], 0) - self.assertEqual(res_class_i["TrueNegative"]["rate"], "N/A") + self.assertEqual(res_class_i["TrueNegative"]["rate"], -1) self.assertEqual(res_class_i["FalseNegative"]["rate"], 100) - self.assertEqual(res_class_i["FalsePositive"]["rate"], "N/A") + self.assertEqual(res_class_i["FalsePositive"]["rate"], -1) self.assertEqual(res_class_i["TruePositive"]["numerator"], 0) self.assertEqual(res_class_i["TruePositive"]["denominator"], self.n_dp) @@ -200,9 +200,9 @@ def test_analyze_fully_misclassified_rev(self): pprint.pprint(json_object) for i in range(self.n_classes): res_class_i = json_object["class_" + str(i)] - self.assertEqual(res_class_i["TruePositive"]["rate"], "N/A") + self.assertEqual(res_class_i["TruePositive"]["rate"], -1) self.assertEqual(res_class_i["TrueNegative"]["rate"], 0) - self.assertEqual(res_class_i["FalseNegative"]["rate"], "N/A") + self.assertEqual(res_class_i["FalseNegative"]["rate"], -1) self.assertEqual(res_class_i["FalsePositive"]["rate"], 100) self.assertEqual(res_class_i["TruePositive"]["numerator"], 0) diff --git a/tests/defences/detector/poison/test_provenance_defence.py b/tests/defences/detector/poison/test_provenance_defence.py index bcf03cfc10..057b10172e 100644 --- a/tests/defences/detector/poison/test_provenance_defence.py +++ b/tests/defences/detector/poison/test_provenance_defence.py @@ -99,6 +99,7 @@ def setUpClass(cls): eps=1.0, x_val=valid_data, y_val=valid_labels, + max_iter=100, verbose=False, ) diff --git a/tests/defences/preprocessor/test_spatial_smoothing_tensorflow.py b/tests/defences/preprocessor/test_spatial_smoothing_tensorflow.py index cd7c5bc75d..5c420f93a1 100644 --- a/tests/defences/preprocessor/test_spatial_smoothing_tensorflow.py +++ b/tests/defences/preprocessor/test_spatial_smoothing_tensorflow.py @@ -116,7 +116,7 @@ def test_spatial_smoothing_video_data(art_warning, video_batch, channels_first): art_warning(e) -@pytest.mark.only_with_platform("tensorflow", "tensorflow2v1") +@pytest.mark.only_with_platform("tensorflow") def test_non_spatial_data_error(art_warning, tabular_batch): try: test_input = tabular_batch diff --git a/tests/defences/test_neural_cleanse.py b/tests/defences/test_neural_cleanse.py index 8ff885e4f8..34acc27925 100644 --- a/tests/defences/test_neural_cleanse.py +++ b/tests/defences/test_neural_cleanse.py @@ -64,7 +64,7 @@ def test_keras(self): from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D from tensorflow.keras.losses import CategoricalCrossentropy - from tensorflow.keras.optimizers import Adam + from tensorflow.keras.optimizers.legacy import Adam model = Sequential() model.add(Conv2D(filters=4, kernel_size=(5, 5), strides=1, activation="relu", input_shape=(28, 28, 1))) diff --git a/tests/defences/trainer/test_adversarial_trainer_awp_pytorch.py b/tests/defences/trainer/test_adversarial_trainer_awp_pytorch.py index 3940654be8..b7281b64a6 100644 --- a/tests/defences/trainer/test_adversarial_trainer_awp_pytorch.py +++ b/tests/defences/trainer/test_adversarial_trainer_awp_pytorch.py @@ -124,7 +124,7 @@ def test_adversarial_trainer_awppgd_pytorch_fit_and_predict(get_adv_trainer_awpp else: accuracy = np.sum(predictions == y_test_mnist) / x_test_mnist.shape[0] - trainer.fit(x_train_mnist, y_train_mnist, nb_epochs=20) + trainer.fit(x_train_mnist, y_train_mnist, nb_epochs=40) predictions_new = np.argmax(trainer.predict(x_test_mnist), axis=1) if label_format == "one_hot": diff --git a/tests/defences/trainer/test_certified_adversarial_trainer.py b/tests/defences/trainer/test_certified_adversarial_trainer.py index 3cbaa984e6..1f41bc18c1 100644 --- a/tests/defences/trainer/test_certified_adversarial_trainer.py +++ b/tests/defences/trainer/test_certified_adversarial_trainer.py @@ -213,7 +213,7 @@ def test_mnist_certified_loss(art_warning, fix_get_mnist_data): certified_loss += sample_loss - assert round(float(certified_loss.cpu().detach().numpy()), 4) == -309.2724 + assert float(certified_loss.cpu().detach().numpy()) == pytest.approx(-309.2724, abs=0.001) assert samples_certified == 94 # empirically check that PGD does not give a lower acc diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index bcae2c4844..cee00eda4e 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -164,7 +164,7 @@ def build_model(input_shape): return tf.keras.Model(inputs=img_inputs, outputs=x) loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) - optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) + optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.01) for dataset, dataset_name in zip([fix_get_mnist_data, fix_get_cifar10_data], ["mnist", "cifar"]): if dataset_name == "mnist": @@ -328,7 +328,7 @@ def get_weights(): net.set_weights(get_weights()) loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) - optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) + optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.01) try: for ablation_type in ["column", "block"]: diff --git a/tests/estimators/certification/test_macer.py b/tests/estimators/certification/test_macer.py index cbc6818e2f..94bd486140 100644 --- a/tests/estimators/certification/test_macer.py +++ b/tests/estimators/certification/test_macer.py @@ -57,7 +57,7 @@ def _get_classifier(): import tensorflow as tf classifier, _ = get_image_classifier_tf() - optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, name="SGD", decay=5e-4) + optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.01, momentum=0.9, name="SGD", decay=5e-4) scheduler = tf.keras.optimizers.schedules.PiecewiseConstantDecay([250, 400], [0.01, 0.001, 0.0001]) rs = TensorFlowV2MACER( model=classifier.model, diff --git a/tests/estimators/certification/test_smooth_adv.py b/tests/estimators/certification/test_smooth_adv.py index 38a0fb5a8d..48c7413659 100644 --- a/tests/estimators/certification/test_smooth_adv.py +++ b/tests/estimators/certification/test_smooth_adv.py @@ -57,7 +57,7 @@ def _get_classifier(): import tensorflow as tf classifier, _ = get_image_classifier_tf() - optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, name="SGD", decay=1e-4) + optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.01, momentum=0.9, name="SGD", decay=1e-4) scheduler = tf.keras.optimizers.schedules.PiecewiseConstantDecay([50, 100], [0.01, 0.001, 0.0001]) rs = TensorFlowV2SmoothAdv( model=classifier.model, diff --git a/tests/estimators/classification/test_deep_partition_ensemble.py b/tests/estimators/classification/test_deep_partition_ensemble.py index 4483464135..ac88f9cba6 100644 --- a/tests/estimators/classification/test_deep_partition_ensemble.py +++ b/tests/estimators/classification/test_deep_partition_ensemble.py @@ -27,7 +27,7 @@ from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.losses import categorical_crossentropy -from tensorflow.keras.optimizers import Adam +from tensorflow.keras.optimizers.legacy import Adam import torch.nn as nn import torch.nn.functional as F @@ -112,6 +112,7 @@ def call(self, x): model = TensorFlowModel() loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True) optimizer = Adam(learning_rate=0.01) + model.compile(loss=loss_object, optimizer=optimizer) classifier = TensorFlowV2Classifier( model=model, diff --git a/tests/estimators/classification/test_deeplearning_common.py b/tests/estimators/classification/test_deeplearning_common.py index 60eb82ceba..bf90292d6c 100644 --- a/tests/estimators/classification/test_deeplearning_common.py +++ b/tests/estimators/classification/test_deeplearning_common.py @@ -154,7 +154,7 @@ def test_loss_functions( art_warning(e) -@pytest.mark.skip_framework("non_dl_frameworks", "huggingface") +@pytest.mark.skip_framework("non_dl_frameworks", "huggingface", "tensorflow2", "kerastf") def test_pickle(art_warning, image_dl_estimator, image_dl_estimator_defended, tmp_path): try: full_path = os.path.join(tmp_path, "my_classifier.p") diff --git a/tests/estimators/classification/test_scikitlearn.py b/tests/estimators/classification/test_scikitlearn.py index ae5d457900..14fccaf1c5 100644 --- a/tests/estimators/classification/test_scikitlearn.py +++ b/tests/estimators/classification/test_scikitlearn.py @@ -269,7 +269,7 @@ def test_type(self): def test_predict(self): y_predicted = self.classifier.predict(self.x_test_iris[0:1]) y_expected = np.asarray([[0.07997696, 0.36272544, 0.5572976]]) - np.testing.assert_array_almost_equal(y_predicted, y_expected, decimal=4) + np.testing.assert_array_almost_equal(y_predicted, y_expected, decimal=3) def test_class_gradient_none_1(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:1], label=None) @@ -280,7 +280,7 @@ def test_class_gradient_none_1(self): [0.6508137, 0.26377308, 1.54522324, 0.80972391], ] ] - np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=4) + np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=3) def test_class_gradient_none_2(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:2], label=None) @@ -296,11 +296,11 @@ def test_class_gradient_none_2(self): [0.70875132, 0.25104877, 1.70929277, 0.88410652], ], ] - np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=4) + np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=3) def test_class_gradient_int_1(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:1], label=1) - grad_expected = [[[-0.56322294, -0.70427608, -0.98874801, -0.67053026]]] + grad_expected = [[[-0.56317311, -0.70493763, -0.98908609, -0.67106276]]] for i_shape in range(4): self.assertAlmostEqual(grad_predicted[0, 0, i_shape], grad_expected[0][0][i_shape], 3) @@ -308,14 +308,14 @@ def test_class_gradient_int_1(self): def test_class_gradient_int_2(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:2], label=1) grad_expected = [ - [[-0.56322294, -0.70427608, -0.98874801, -0.67053026]], - [[-0.50528532, -0.71700042, -0.82467848, -0.59614766]], + [[-0.56317306, -0.70493776, -0.98908573, -0.67106259]], + [[-0.50522697, -0.71762568, -0.82497531, -0.5966416]], ] np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=4) def test_class_gradient_list_1(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:1], label=[1]) - grad_expected = [[[-0.56322294, -0.70427608, -0.98874801, -0.67053026]]] + grad_expected = [[[-0.56317311, -0.70493763, -0.98874801, -0.67106276]]] for i_shape in range(4): self.assertAlmostEqual(grad_predicted[0, 0, i_shape], grad_expected[0][0][i_shape], 3) @@ -323,10 +323,10 @@ def test_class_gradient_list_1(self): def test_class_gradient_list_2(self): grad_predicted = self.classifier.class_gradient(self.x_test_iris[0:2], label=[1, 2]) grad_expected = [ - [[-0.56322294, -0.70427608, -0.98874801, -0.67053026]], - [[0.70875132, 0.25104877, 1.70929277, 0.88410652]], + [[-0.56317306, -0.70493776, -0.98908573, -0.67106259]], + [[0.70866591, 0.25158876, 1.70947325, 0.88450021]], ] - np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=4) + np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=3) def test_class_gradient_label_wrong_type(self): @@ -339,7 +339,7 @@ def test_class_gradient_label_wrong_type(self): def test_loss_gradient(self): grad_predicted = self.classifier.loss_gradient(self.x_test_iris[0:1], self.y_test_iris[0:1]) - grad_expected = np.asarray([[-0.21693791, -0.08792436, -0.51507443, -0.26990796]]) + grad_expected = np.asarray([[-0.21690657, -0.08809226, -0.51512082, -0.27002635]]) np.testing.assert_array_almost_equal(grad_predicted, grad_expected, decimal=4) def test_save(self): diff --git a/tests/estimators/speech_recognition/test_pytorch_deep_speech.py b/tests/estimators/speech_recognition/test_pytorch_deep_speech.py index dc49f214d2..b571c82fc2 100644 --- a/tests/estimators/speech_recognition/test_pytorch_deep_speech.py +++ b/tests/estimators/speech_recognition/test_pytorch_deep_speech.py @@ -170,7 +170,7 @@ def test_pytorch_deep_speech_preprocessor( # Test probability outputs probs, sizes = speech_recognizer.predict(x, batch_size=1, transcription_output=False) - np.testing.assert_array_almost_equal(probs[1][1], expected_probs, decimal=3) + np.testing.assert_array_almost_equal(probs[1][1], expected_probs, decimal=2) np.testing.assert_array_almost_equal(sizes, expected_sizes) # Test transcription outputs diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index a507e7e4da..3e5959b882 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -143,7 +143,9 @@ def _cnn_mnist_k(input_shape): model.add(Dense(10, activation="softmax")) model.compile( - loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=["accuracy"] + loss=keras.losses.categorical_crossentropy, + optimizer=keras.optimizers.legacy.Adam(lr=0.01), + metrics=["accuracy"], ) classifier = KerasClassifier(model=model, clip_values=(0, 1), use_logits=False) @@ -219,7 +221,9 @@ def _create_krclassifier(): model.add(Dense(10, activation="softmax")) model.compile( - loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=["accuracy"] + loss=keras.losses.categorical_crossentropy, + optimizer=keras.optimizers.legacy.Adam(lr=0.01), + metrics=["accuracy"], ) # Get the classifier diff --git a/tests/metrics/test_verification_decision_trees.py b/tests/metrics/test_verification_decision_trees.py index 02ddef54b7..fbbd4c2737 100644 --- a/tests/metrics/test_verification_decision_trees.py +++ b/tests/metrics/test_verification_decision_trees.py @@ -71,8 +71,8 @@ def test_XGBoost(self): x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2 ) - self.assertEqual(average_bound, 0.03186914062500001) - self.assertEqual(verified_error, 0.99) + self.assertAlmostEqual(average_bound, second=0.03335742187499999, places=6) + self.assertEqual(verified_error, second=0.99) def test_LightGBM(self): train_data = lightgbm.Dataset(self.x_train, label=np.argmax(self.y_train, axis=1)) @@ -92,7 +92,7 @@ def test_LightGBM(self): "verbose": 0, } - model = lightgbm.train(parameters, train_data, valid_sets=test_data, num_boost_round=2, early_stopping_rounds=1) + model = lightgbm.train(parameters, train_data, valid_sets=test_data, num_boost_round=2) classifier = LightGBMClassifier(model=model) diff --git a/tests/utils.py b/tests/utils.py index 59b6b78cfe..4e32c32497 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -494,7 +494,8 @@ def get_image_classifier_tf_v2(from_logits=False): loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=from_logits, reduction=tf.keras.losses.Reduction.SUM ) - optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) + + optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=0.01) model.compile(optimizer=optimizer, loss=loss_object) @@ -640,7 +641,7 @@ def get_image_classifier_kr( loss = loss_name elif loss_type == "function_losses": if from_logits: - if int(keras.__version__.split(".")[0]) == 2 and int(keras.__version__.split(".")[1]) >= 3: + if is_tf23_keras24: def categorical_crossentropy(y_true, y_pred): return keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) @@ -704,7 +705,7 @@ def sparse_categorical_crossentropy(y_true, y_pred): else: raise ValueError("Loss name not recognised.") - model.compile(loss=loss, optimizer=keras.optimizers.Adam(lr=0.01), metrics=["accuracy"]) + model.compile(loss=loss, optimizer=keras.optimizers.legacy.Adam(lr=0.01), metrics=["accuracy"]) # Get classifier krc = KerasClassifier(model, clip_values=(0, 1), use_logits=from_logits) @@ -962,7 +963,7 @@ def sparse_categorical_crossentropy(y_true, y_pred): else: raise ValueError("Loss name not recognised.") - model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(lr=0.01), metrics=["accuracy"]) + model.compile(loss=loss, optimizer=tf.keras.optimizers.legacy.Adam(lr=0.01), metrics=["accuracy"]) # Get classifier krc = KerasClassifier(model, clip_values=(0, 1), use_logits=from_logits) @@ -1000,7 +1001,7 @@ def get_image_classifier_kr_tf_binary(): [_kr_tf_weights_loader("MNIST_BINARY", "W", "DENSE"), _kr_tf_weights_loader("MNIST_BINARY", "B", "DENSE")] ) - model.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(lr=0.01), metrics=["accuracy"]) + model.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.legacy.Adam(lr=0.01), metrics=["accuracy"]) # Get classifier krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False) @@ -1723,7 +1724,7 @@ def call(self, x): model = TensorFlowModel() loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) - optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) + optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=0.01) # Create the classifier tfc = TensorFlowV2Classifier( @@ -1885,7 +1886,9 @@ def get_tabular_classifier_kr(load_init=True): model.add(Dense(10, activation="relu")) model.add(Dense(3, activation="softmax")) - model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(lr=0.001), metrics=["accuracy"]) + model.compile( + loss="categorical_crossentropy", optimizer=keras.optimizers.legacy.Adam(lr=0.001), metrics=["accuracy"] + ) # Get classifier krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False, channels_first=True) @@ -1978,7 +1981,7 @@ def get_tabular_regressor_kr(load_init=True): model.add(Dense(10, activation="relu")) model.add(Dense(1)) - model.compile(loss="mean_squared_error", optimizer=keras.optimizers.Adam(lr=0.001), metrics=["accuracy"]) + model.compile(loss="mean_squared_error", optimizer=keras.optimizers.legacy.Adam(lr=0.001), metrics=["accuracy"]) # Get regressor krc = KerasRegressor(model)