diff --git a/.github/model-analysis-config.sh b/.github/model-analysis-config.sh new file mode 100644 index 000000000..6e3f3286b --- /dev/null +++ b/.github/model-analysis-config.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 + +# If set to true, it will set the environment variables for models ops test generation otherwise markdown generation env variables will be set +GENERATE_MODELS_OPS_TEST=$1 + +# Declare an associative array to store environment variables +declare -A env_vars + +# Markdown Generation +# 1) PR config +env_vars["BRANCH_NAME"]="model_analysis" +env_vars["COMMIT_MESSAGE"]="Update model analysis documentation" +env_vars["TITLE"]="Update model analysis documentation" +env_vars["BODY"]="This PR will update model analysis documentation." +env_vars["OUTPUT_PATH"]="model_analysis_docs/" + +# 2) Script config +env_vars["MARDOWN_DIR_PATH"]="./model_analysis_docs" +env_vars["SCRIPT_OUTPUT_LOG"]="model_analysis.log" + + +# Model ops test generation +# 1) Script config +env_vars["MODELS_OPS_TEST_OUTPUT_DIR_PATH"]="forge/test" +env_vars["MODELS_OPS_TEST_PACKAGE_NAME"]="models_ops" + + +# Common Config for markdown generation and model ops test generation +env_vars["TEST_DIR_OR_FILE_PATH"]="forge/test/models" +env_vars["UNIQUE_OPS_OUTPUT_DIR_PATH"]="./models_unique_ops_output" + + +# If GENERATE_MODELS_OPS_TEST is set to true, Modify the PR config to model ops test generation. +if [[ "$GENERATE_MODELS_OPS_TEST" == "true" ]]; then + env_vars["BRANCH_NAME"]="generate_models_ops_test" + env_vars["COMMIT_MESSAGE"]="Generate and update models ops tests" + env_vars["TITLE"]="Generate and update models ops tests" + env_vars["BODY"]="This PR will generate models ops tests by extracting the unique ops configurations across all the pytorch models present inside the forge/test/models directory path." + env_vars["OUTPUT_PATH"]="forge/test/models_ops/" + env_vars["SCRIPT_OUTPUT_LOG"]="generate_models_ops_test.log" +fi + + +for key in "${!env_vars[@]}"; do + echo "$key=${env_vars[$key]}" +done diff --git a/.github/workflows/model-analysis-weekly.yml b/.github/workflows/model-analysis-weekly.yml index cd520f1a1..8a062905c 100644 --- a/.github/workflows/model-analysis-weekly.yml +++ b/.github/workflows/model-analysis-weekly.yml @@ -6,116 +6,8 @@ on: - cron: '0 23 * * 5' # 11:00 PM UTC Friday (12:00 AM Saturday Serbia) jobs: - - docker-build: - uses: ./.github/workflows/build-image.yml + model-analysis-weekly: + uses: ./.github/workflows/model-analysis.yml secrets: inherit - - model-analysis: - needs: docker-build - runs-on: runner - timeout-minutes: 10080 # Set job execution time to 7 days(default: 6 hours) - - container: - image: ${{ needs.docker-build.outputs.docker-image }} - options: --device /dev/tenstorrent/0 - volumes: - - /dev/hugepages:/dev/hugepages - - /dev/hugepages-1G:/dev/hugepages-1G - - /etc/udev/rules.d:/etc/udev/rules.d - - /lib/modules:/lib/modules - - /opt/tt_metal_infra/provisioning/provisioning_env:/opt/tt_metal_infra/provisioning/provisioning_env - - env: - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - - steps: - - - name: Set reusable strings - id: strings - shell: bash - run: | - echo "work-dir=$(pwd)" >> "$GITHUB_OUTPUT" - echo "build-output-dir=$(pwd)/build" >> "$GITHUB_OUTPUT" - - - name: Git safe dir - run: git config --global --add safe.directory ${{ steps.strings.outputs.work-dir }} - - - uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 # Fetch all history and tags - token: ${{ env.GITHUB_TOKEN }} - - # Clean everything from submodules (needed to avoid issues - # with cmake generated files leftover from previous builds) - - name: Cleanup submodules - run: | - git submodule foreach --recursive git clean -ffdx - git submodule foreach --recursive git reset --hard - - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - create-symlink: true - key: model-analysis-${{ runner.os }} - - - name: Build - shell: bash - run: | - source env/activate - cmake -G Ninja \ - -B ${{ steps.strings.outputs.build-output-dir }} \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_C_COMPILER=clang \ - -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER_LAUNCHER=ccache \ - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - cmake --build ${{ steps.strings.outputs.build-output-dir }} - - - name: Run Model Analysis Script - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - HF_HUB_DISABLE_PROGRESS_BARS: 1 - shell: bash - run: | - source env/activate - apt-get update - apt install -y libgl1 libglx-mesa0 - set -o pipefail # Ensures that the exit code reflects the first command that fails - python scripts/model_analysis.py \ - --test_directory_or_file_path forge/test/models/pytorch \ - --dump_failure_logs \ - --markdown_directory_path ./model_analysis_docs \ - --unique_ops_output_directory_path ./models_unique_ops_output \ - 2>&1 | tee model_analysis.log - - - name: Upload Model Analysis Script Logs - uses: actions/upload-artifact@v4 - if: success() || failure() - with: - name: model-analysis-outputs - path: model_analysis.log - - - name: Upload Models Unique Ops test Failure Logs - uses: actions/upload-artifact@v4 - if: success() || failure() - with: - name: unique-ops-logs - path: ./models_unique_ops_output - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 - with: - branch: model_analysis - committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> - author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> - base: main - commit-message: "Update model analysis docs" - title: "Update model analysis docs" - body: "This PR will update model analysis docs" - labels: automatic_model_analysis - delete-branch: true - token: ${{ env.GITHUB_TOKEN }} - add-paths: | - model_analysis_docs/ + with: + generate_models_ops_test: false diff --git a/.github/workflows/model-analysis.yml b/.github/workflows/model-analysis.yml new file mode 100644 index 000000000..53da81a94 --- /dev/null +++ b/.github/workflows/model-analysis.yml @@ -0,0 +1,155 @@ +name: Model Analysis + +on: + workflow_dispatch: + inputs: + generate_models_ops_test: + description: 'If set to True, it will generate models ops test by extracting the unique ops config across all the models otherwise it will run the model analysis and generate markdown files' + required: false + type: boolean + default: false + workflow_call: + inputs: + generate_models_ops_test: + description: 'If set to True, it will generate models ops test by extracting the unique ops config across all the models otherwise it will run the model analysis and generate markdown files' + required: false + type: boolean + default: false + +jobs: + + docker-build: + uses: ./.github/workflows/build-image.yml + secrets: inherit + + model-analysis: + needs: docker-build + runs-on: runner + timeout-minutes: 4320 # Set job execution time to 3 days(default: 6 hours) + + container: + image: ${{ needs.docker-build.outputs.docker-image }} + options: --device /dev/tenstorrent/0 + volumes: + - /dev/hugepages:/dev/hugepages + - /dev/hugepages-1G:/dev/hugepages-1G + - /etc/udev/rules.d:/etc/udev/rules.d + - /lib/modules:/lib/modules + - /opt/tt_metal_infra/provisioning/provisioning_env:/opt/tt_metal_infra/provisioning/provisioning_env + + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + HF_TOKEN: ${{ secrets.HF_TOKEN }} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + + steps: + + - name: Set reusable strings + id: strings + shell: bash + run: | + echo "work-dir=$(pwd)" >> "$GITHUB_OUTPUT" + echo "build-output-dir=$(pwd)/build" >> "$GITHUB_OUTPUT" + + - name: Git safe dir + run: git config --global --add safe.directory ${{ steps.strings.outputs.work-dir }} + + - uses: actions/checkout@v4 + with: + submodules: recursive + fetch-depth: 0 # Fetch all history and tags + token: ${{ env.GITHUB_TOKEN }} + + # Clean everything from submodules (needed to avoid issues + # with cmake generated files leftover from previous builds) + - name: Cleanup submodules + run: | + git submodule foreach --recursive git clean -ffdx + git submodule foreach --recursive git reset --hard + + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + create-symlink: true + key: model-analysis-${{ runner.os }} + + - name: Set environment variables + shell: bash + run: | + OUTPUT=$(bash .github/model-analysis-config.sh ${{ inputs.generate_models_ops_test }}) + # Assign the script output to GitHub environment variables + echo "$OUTPUT" | while IFS= read -r line; do + echo "$line" >> $GITHUB_ENV + done + + - name: Build + shell: bash + run: | + source env/activate + cmake -G Ninja \ + -B ${{ steps.strings.outputs.build-output-dir }} \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache + cmake --build ${{ steps.strings.outputs.build-output-dir }} + + - name: Run Model Analysis Script + if: ${{ !inputs.generate_models_ops_test }} + shell: bash + run: | + source env/activate + apt-get update + apt install -y libgl1 libglx-mesa0 + set -o pipefail # Ensures that the exit code reflects the first command that fails + python scripts/model_analysis/run_analysis_and_generate_md_files.py \ + --test_directory_or_file_path ${{ env.TEST_DIR_OR_FILE_PATH }} \ + --dump_failure_logs \ + --markdown_directory_path ${{ env.MARDOWN_DIR_PATH }} \ + --unique_ops_output_directory_path ${{ env.UNIQUE_OPS_OUTPUT_DIR_PATH }} \ + 2>&1 | tee ${{ env.SCRIPT_OUTPUT_LOG }} + + - name: Generate Models Ops test + if: ${{ inputs.generate_models_ops_test }} + shell: bash + run: | + source env/activate + apt-get update + apt install -y libgl1 libglx-mesa0 + set -o pipefail # Ensures that the exit code reflects the first command that fails + python scripts/model_analysis/generate_models_ops_test.py \ + --test_directory_or_file_path ${{ env.TEST_DIR_OR_FILE_PATH }} \ + --unique_ops_output_directory_path ${{ env.UNIQUE_OPS_OUTPUT_DIR_PATH }} \ + --models_ops_test_output_directory_path ${{ env.MODELS_OPS_TEST_OUTPUT_DIR_PATH }} \ + --models_ops_test_package_name ${{ env.MODELS_OPS_TEST_PACKAGE_NAME }} \ + 2>&1 | tee ${{ env.SCRIPT_OUTPUT_LOG }} + + - name: Upload Script Output Logs + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: script-outputs + path: ${{ env.SCRIPT_OUTPUT_LOG }} + + - name: Upload Models Unique Ops test Failure Logs + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: unique-ops-logs + path: ${{ env.UNIQUE_OPS_OUTPUT_DIR_PATH }} + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + branch: ${{ env.BRANCH_NAME }} + committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> + author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> + base: main + commit-message: ${{ env.COMMIT_MESSAGE }} + title: ${{ env.TITLE }} + body: ${{ env.BODY }} + delete-branch: true + token: ${{ env.GITHUB_TOKEN }} + add-paths: | + ${{ env.OUTPUT_PATH }} diff --git a/forge/forge/config.py b/forge/forge/config.py index 314f2a636..0680d3bbe 100644 --- a/forge/forge/config.py +++ b/forge/forge/config.py @@ -184,16 +184,19 @@ class CompilerConfig: # Number of patterns to match for each module tvm_module_to_num_patterns: Dict[str, int] = field(default_factory=lambda: dict()) - # If enabled, for given test, it generates Forge Modules in form of PyTest for each unique operation configuration within the given module. + # If enabled, for given test, it only extracts the unique operation configuration. + extract_tvm_unique_ops_config: bool = False + + # If enabled, for given test, it extracts the unique operation configuration and generates Forge Modules in form of PyTest for each unique operation configuration within the given module. # Each configuration is based on: # - Operand Type (e.g., Activation, Parameter, Constant) # - Operand Shape # - Operand DataType # - Operation Arguments (if any) - tvm_generate_unique_op_tests: bool = False + tvm_generate_unique_ops_tests: bool = False - # Export the generated unique operations configurations information with test file path to the excel file - export_tvm_generated_unique_op_tests_details: bool = False + # Export the unique operations configurations information to the excel file + export_tvm_unique_ops_config_details: bool = False # Enables a transform for conv that directly reads input, such that it goes from stride > 1 to stride = 1 # This usually translates to lower DRAM BW and less math as the input better populates tiles @@ -359,9 +362,9 @@ def apply_env_config_overrides(self): os.environ["FORGE_OVERRIDE_DEVICE_YAML"] ) - if "FORGE_EXPORT_TVM_GENERATED_UNIQUE_OP_TESTS_DETAILS" in os.environ: - self.export_tvm_generated_unique_op_tests_details = bool( - int(os.environ["FORGE_EXPORT_TVM_GENERATED_UNIQUE_OP_TESTS_DETAILS"]) + if "FORGE_EXPORT_TVM_UNIQUE_OPS_CONFIG_DETAILS" in os.environ: + self.export_tvm_unique_ops_config_details = bool( + int(os.environ["FORGE_EXPORT_TVM_UNIQUE_OPS_CONFIG_DETAILS"]) ) def __post_init__(self): diff --git a/forge/forge/module.py b/forge/forge/module.py index ce084c2d8..fd193ca19 100644 --- a/forge/forge/module.py +++ b/forge/forge/module.py @@ -722,7 +722,13 @@ def add_parameter(self, name: str, parameter: Parameter, prepend_name: bool = Fa else: parameter._set_auto_name(name) - def add_constant(self, name: str, prepend_name: bool = False, shape: Tuple[int] = None): + def add_constant( + self, + name: str, + prepend_name: bool = False, + shape: Optional[Tuple[int, ...]] = None, + dtype: torch.dtype = torch.float32, + ): """ Adds a new constant. @@ -733,6 +739,12 @@ def add_constant(self, name: str, prepend_name: bool = False, shape: Tuple[int] prepend_name: Bool Whether to prepend module name to constant name + + shape: Optional[Tuple[int, ...]] + Shape of the constant tensor + + dtype: torch.dtype + Datatype of the constant tensor """ if name in self._constants: @@ -741,7 +753,7 @@ def add_constant(self, name: str, prepend_name: bool = False, shape: Tuple[int] _name = name if shape: - self._constants[_name] = Tensor.create_from_torch(torch.empty(shape), constant=True) + self._constants[_name] = Tensor.create_from_torch(torch.empty(shape, dtype=dtype), constant=True) else: self._constants[_name] = None diff --git a/forge/forge/python_codegen.py b/forge/forge/python_codegen.py index 4aab10d92..deae9e9d6 100644 --- a/forge/forge/python_codegen.py +++ b/forge/forge/python_codegen.py @@ -10,7 +10,7 @@ import forge from forge.tensor import forge_dataformat_to_pytorch_dtype -from typing import Tuple, List +from typing import Any, Dict, List, Optional, Tuple, Union def forge_df_from_str(df: str, name: str, return_as_str: bool = True): @@ -156,6 +156,7 @@ def write_header(self, include_pytest_imports=False): self.wl("from forge import Tensor, compile") self.wl("from forge.verify.compare import compare_with_golden") self.wl("from forge.verify.verify import verify") + self.wl("from forge.verify.value_checkers import AutomaticValueChecker") self.wl("from forge.verify.config import VerifyConfig") self.wl("import pytest") @@ -207,11 +208,12 @@ def write_class_definition(self, params, constants, class_name=None, num_submode for const in constants.values(): name = const[0] shape = tuple(const[1]) + dtype = pytorch_df_str_from_str(const[2], name) self.const_names.append(name) if is_submodel: - self.wl(f'self.add_constant("{name}", prepend_name=True, shape={shape})') + self.wl(f'self.add_constant("{name}", prepend_name=True, shape={shape}, dtype={dtype})') else: - self.wl(f'self.add_constant("{name}", shape={shape})') + self.wl(f'self.add_constant("{name}", shape={shape}, dtype={dtype})') self.indent = 0 self.wl("") @@ -1024,14 +1026,19 @@ def write_model_parameter_function(self, param_file_name, named_params_file_name def write_pytest_function( self, forge_module_names: List[str], - framework: str, - pytest_input_shapes_and_dtypes_list: List[List[Tuple]], + pytest_input_shapes_and_dtypes_list: List[List[Union[Tuple[Any, ...], str]]], + markers: Optional[List[str]] = None, + module_metadata: Optional[Dict[str, Any]] = None, + pytest_metadata_list: Optional[List[Dict[str, Any]]] = None, + use_ids_function: bool = False, + include_random_parameter_constant_gen: bool = False, ): """ Generates a pytest function that tests modules with input shapes and data types. This function writes a pytest function that: - 1. Creates a list of forge module names, their associated input shapes, and data types into a pytest parameter list. + 1. Creates a list of forge module names, their associated input shapes, and data types and metadata (i.e model and variant name which the shape and dtype belongs to) into a pytest parameter list. + 2. Creates a record_property fixtures for the metadata that are passed from the pytest parameter and for module_metadata argument directly intialize with property name and value 2. Creates inputs(i.e TensorFromPyTorch) for the forge module by calling the create_from_shape Tensor class method with shapes and dtypes from the pytest parameter. 3. Initializes the framework model using the forge module from the pytest parameter and call the `process_framework_parameters` function for module. 4. Runs the framework model with the created inputs. @@ -1041,28 +1048,73 @@ def write_pytest_function( Args: forge_module_names (List[str]): List of names of the modules to be tested, each corresponding to a forge module. - framework (str): The name of the framework under which the model is to be tested (e.g., "pytorch"). - pytest_input_shapes_and_dtypes_list (List[List[Tuple]]): A list of input shapes and corresponding data types for each module. Each tuple contains the shape and dtype to be tested. + pytest_input_shapes_and_dtypes_list (List[List[Union[Tuple[Any, ...], str]]]): A list of input shapes and corresponding data types for each module. Each tuple contains the shape and dtype to be tested. + markers (Optional[List[str]]): A list of pytest markers that will be added above the test function. + module_metadata (Optional[Dict[str, Any]]): A dictionary containing metadata about the test function. Each key-value pair represents a metadata property name and its corresponding value, which will be recorded using the `record_property` pytest fixtures. + pytest_metadata_list (Optional[List[Dict[str, Any]]]): A list of dictionaries containing metadata for each pytest parameter. + use_ids_function(bool): If set, the forge module name and shapes and dtyes will used as id for the pytest parameter. + include_random_parameter_constant_gen(bool): If set, it will include the code for generating and assigning of random tensor for forge module parameters and constants """ self.wl("") self.wl("") + if use_ids_function: + self.wl("def ids_func(param):") + self.indent += 1 + self.wl("forge_module = param[0]") + self.wl("shapes_dtypes = param[1]") + self.wl('return str(forge_module.__name__) + "-" + str(shapes_dtypes)') + self.indent -= 1 + self.wl("") self.wl("forge_modules_and_shapes_dtypes_list = [") self.indent += 1 - for forge_module_name, pytest_input_shapes_and_dtypes in zip( - forge_module_names, pytest_input_shapes_and_dtypes_list + is_pytest_metadata_list_empty = False + if pytest_metadata_list is None: + pytest_metadata_list = [None] * len(pytest_input_shapes_and_dtypes_list) + is_pytest_metadata_list_empty = True + for forge_module_name, pytest_input_shapes_and_dtypes, pytest_metadata in zip( + forge_module_names, pytest_input_shapes_and_dtypes_list, pytest_metadata_list ): pytest_input_shapes_and_dtypes = [ (shape, forge_dataformat_to_pytorch_dtype(forge_df_from_str(dtype, "", False))) for shape, dtype in pytest_input_shapes_and_dtypes ] - self.wl(f"({forge_module_name}, {pytest_input_shapes_and_dtypes}), ") + if pytest_metadata is None: + self.wl(f"({forge_module_name}, {pytest_input_shapes_and_dtypes}), ") + else: + self.wl(f"({forge_module_name}, {pytest_input_shapes_and_dtypes}, {pytest_metadata}), ") self.indent -= 1 self.wl("]") - self.wl('@pytest.mark.parametrize("forge_module_and_shapes_dtypes", forge_modules_and_shapes_dtypes_list)') - self.wl("def test_module(forge_module_and_shapes_dtypes):") + if markers is not None: + for marker in markers: + self.wl(f"@pytest.mark.{marker}") + if use_ids_function: + self.wl( + '@pytest.mark.parametrize("forge_module_and_shapes_dtypes", forge_modules_and_shapes_dtypes_list, ids=ids_func)' + ) + else: + self.wl('@pytest.mark.parametrize("forge_module_and_shapes_dtypes", forge_modules_and_shapes_dtypes_list)') + if module_metadata is not None or not is_pytest_metadata_list_empty: + self.wl("def test_module(forge_module_and_shapes_dtypes, record_property):") + else: + self.wl("def test_module(forge_module_and_shapes_dtypes):") self.indent += 1 + if module_metadata is not None: + for metadata_name, metadata_value in module_metadata.items(): + if isinstance(metadata_value, str): + self.wl(f'record_property("{metadata_name}", "{metadata_value}")') + else: + self.wl(f'record_property("{metadata_name}", {metadata_value})') self.wl("") - self.wl("forge_module, operand_shapes_dtypes = forge_module_and_shapes_dtypes") + if is_pytest_metadata_list_empty: + self.wl("forge_module, operand_shapes_dtypes = forge_module_and_shapes_dtypes") + else: + self.wl("forge_module, operand_shapes_dtypes, metadata = forge_module_and_shapes_dtypes") + self.wl('pcc = metadata.pop("pcc")') + self.wl("") + self.wl("for metadata_name, metadata_value in metadata.items():") + self.indent += 1 + self.wl(f"record_property(metadata_name, metadata_value)") + self.indent -= 1 self.wl("") need_model_parameter_function = any( [ @@ -1082,10 +1134,32 @@ def write_pytest_function( self.wl("") self.wl(f"framework_model = forge_module(forge_module.__name__)") self.wl("framework_model.process_framework_parameters()") + if include_random_parameter_constant_gen: + self.wl("") + self.wl("for name, parameter in framework_model._parameters.items():") + self.indent += 1 + self.wl( + "parameter_tensor = Tensor.create_torch_tensor(shape=parameter.shape.get_pytorch_shape(), dtype=parameter.pt_data_format)" + ) + self.wl("framework_model.set_parameter(name, parameter_tensor)") + self.indent -= 1 + self.wl("") + self.wl("for name, constant in framework_model._constants.items():") + self.indent += 1 + self.wl( + "constant_tensor = Tensor.create_torch_tensor(shape=constant.shape.get_pytorch_shape(), dtype=constant.pt_data_format)" + ) + self.wl("framework_model.set_constant(name, constant_tensor)") + self.indent -= 1 self.wl("") self.wl("compiled_model = compile(framework_model, sample_inputs=inputs)") self.wl("") - self.wl("verify(inputs, framework_model, compiled_model)") + if is_pytest_metadata_list_empty: + self.wl("verify(inputs, framework_model, compiled_model)") + else: + self.wl( + "verify(inputs, framework_model, compiled_model, VerifyConfig(value_checker=AutomaticValueChecker(pcc=pcc)))" + ) self.wl("") self.wl("") self.indent -= 1 diff --git a/forge/forge/tensor.py b/forge/forge/tensor.py index 8c8b657e6..4a0a48418 100644 --- a/forge/forge/tensor.py +++ b/forge/forge/tensor.py @@ -274,21 +274,35 @@ def create_from_torch( """ return TensorFromPytorch(torch_tensor, dev_data_format, constant) + @classmethod + def create_torch_tensor( + cls, + shape: Union[List[int], Tuple[int, ...], torch.Size], + dtype: Optional[torch.dtype] = None, + integer_tensor_high_value: int = 1000, + ) -> torch.Tensor: + + if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]: + torch_tensor = torch.rand(shape, dtype=dtype) + elif dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: + torch_tensor = torch.randint(high=integer_tensor_high_value, size=shape, dtype=dtype) + else: + torch_tensor = torch.rand(shape, dtype=torch.float32) + + return torch_tensor + @classmethod def create_from_shape( cls, - tensor_shape: Union[List, Tuple, torch.Size], + tensor_shape: Union[List[int], Tuple[int, ...], torch.Size], torch_dtype: Optional[torch.dtype] = None, integer_tensor_high_value: int = 1000, constant: bool = False, ) -> "TensorFromPytorch": - if torch_dtype in [torch.float16, torch.bfloat16, torch.float32]: - torch_tensor = torch.rand(tensor_shape, dtype=torch_dtype) - elif torch_dtype in [torch.int8, torch.int, torch.int32]: - torch_tensor = torch.randint(high=integer_tensor_high_value, size=tensor_shape, dtype=torch_dtype) - else: - torch_tensor = torch.rand(tensor_shape, dtype=torch.float32) + torch_tensor = Tensor.create_torch_tensor( + shape=tensor_shape, dtype=torch_dtype, integer_tensor_high_value=integer_tensor_high_value + ) return TensorFromPytorch( torch_tensor, dev_data_format=pytorch_dtype_to_forge_dataformat(torch_dtype), constant=constant diff --git a/forge/forge/tvm_to_python.py b/forge/forge/tvm_to_python.py index f6b1896da..67e4349ff 100644 --- a/forge/forge/tvm_to_python.py +++ b/forge/forge/tvm_to_python.py @@ -25,7 +25,7 @@ import importlib from forge.python_codegen import PyTorchWriter, ForgeWriter, PythonWriter, pytorch_df_str_from_str -from forge.tvm_unique_op_generation import Operation, NodeType, generate_unique_op_tests +from forge.tvm_unique_op_generation import Operation, NodeType, extract_and_generate_unique_ops_tests def import_from_path(module_name, file_path): @@ -2105,7 +2105,9 @@ def generate_forge_module( else: forge_mod = TestClass(writer.module_name) - if isinstance(framework_mod, forge.module.PyTorchModule) and compiler_cfg.tvm_generate_unique_op_tests: + if isinstance(framework_mod, forge.module.PyTorchModule) and ( + compiler_cfg.extract_tvm_unique_ops_config or compiler_cfg.tvm_generate_unique_ops_tests + ): forge_mod.process_framework_parameters() else: forge_mod.process_framework_parameters(framework_mod.module) @@ -2751,7 +2753,9 @@ def delete_unneeded_outputs(ops, returns): param_file_name = os.path.join(writer.module_directory, writer.module_name + "_params.pt") torch.save(params_from_tvm, param_file_name) - if framework == "pytorch" and compiler_cfg.tvm_generate_unique_op_tests: + if framework == "pytorch" and ( + compiler_cfg.extract_tvm_unique_ops_config or compiler_cfg.tvm_generate_unique_ops_tests + ): # Store named parameters named_params_file_name = os.path.join(writer.module_directory, writer.module_name + "_named_params.pt") named_parameters = dict(framework_mod.module.state_dict().items()) @@ -2777,8 +2781,8 @@ def delete_unneeded_outputs(ops, returns): # Generate unique op tests based on requested model. Currently only supported # for PyTorch framework. - if compiler_cfg.tvm_generate_unique_op_tests: - generate_unique_op_tests( + if compiler_cfg.extract_tvm_unique_ops_config or compiler_cfg.tvm_generate_unique_ops_tests: + extract_and_generate_unique_ops_tests( ops, current_module_name, framework, diff --git a/forge/forge/tvm_unique_op_generation.py b/forge/forge/tvm_unique_op_generation.py index 433c45c85..7f05e9e1f 100644 --- a/forge/forge/tvm_unique_op_generation.py +++ b/forge/forge/tvm_unique_op_generation.py @@ -372,6 +372,7 @@ def create_unique_operations( ops: Dict[int, Operation], named_parameters: Dict[str, torch.Tensor], node_name_to_node_type: Optional[Dict[str, NodeType]] = None, + use_constant_value: bool = True, ): """ Creates unique operations by mapping operand and argument information to forge op names. @@ -380,6 +381,7 @@ def create_unique_operations( ops (dict): Dictionary of operation. named_parameters (dict): Mapping of node name to model parameters and buffers. node_name_to_node_type (dict): Mapping of node names to types. + use_constant_value (Bool): If set to true, replace constant node operand shape with contant tensor value from the named_parameter Returns: UniqueOperations: Populated UniqueOperations dictionary. @@ -408,10 +410,11 @@ def create_unique_operations( ), "Operands names, shape, dtypes are not equal" # Replace constant node operand shape with constant value for comparing with other constant value. - operand_shapes = [ - named_parameters[operand_name] if operand_type == NodeType.Constant else operand_shape - for operand_type, operand_shape, operand_name in zip(operand_types, operand_shapes, operand_names) - ] + if use_constant_value: + operand_shapes = [ + named_parameters[operand_name] if operand_type == NodeType.Constant else operand_shape + for operand_type, operand_shape, operand_name in zip(operand_types, operand_shapes, operand_names) + ] new_operands = OperandsInfo(operand_types, operand_shapes, operand_dtypes) new_args = OpArgs(args) if forge_op_function_name in unique_operations.keys(): @@ -423,6 +426,39 @@ def create_unique_operations( return unique_operations + def create_list_of_dict(self): + unique_operation_details = [] + for forge_op_function_name in sorted(self): + unique_operands_and_opargs_opmetadata = self[ + forge_op_function_name + ].get_unique_operands_and_opargs_opmetadata() + for operands, opargs_opmetadata in unique_operands_and_opargs_opmetadata: + for args, operation_metadata in opargs_opmetadata.get_op_args_and_metadata(): + operand_types = operands.get_operand_types() + operand_shapes = operands.get_operand_shapes() + operand_dtypes = operands.get_operand_dtypes() + operand_names = operation_metadata["operand_names"][0] + operation_info = {} + operation_info["Op"] = forge_op_function_name + operation_info["Operand_Names"] = str(operand_names) + operation_info["Operand_Shapes"] = str( + [ + operand_name if operand_type == NodeType.Constant else operand_shape + for operand_type, operand_shape, operand_name in zip( + operand_types, operand_shapes, operand_names + ) + ] + ) + operation_info["Operand_Types"] = str( + [NodeType.to_json(operand_type) for operand_type in operand_types] + ) + operation_info["Operand_Dtypes"] = str(operand_dtypes) + operation_info["Args"] = str(args) + # Assign empty string since no unique ops test is generated + operation_info["Testfile"] = "" + unique_operation_details.append(operation_info) + return unique_operation_details + def __str__(self): if len(self) > 0: unique_operations_info = "" @@ -440,36 +476,34 @@ def export_unique_op_configuration_info(module_name, unique_operation_data, uniq for operation_info in unique_operation_data: rows.append([operation_info[header] for header in headers]) - export_tvm_generated_unique_op_tests_details_dir_path = os.getenv( - "FORGE_EXPORT_TVM_GENERATED_UNIQUE_OP_TESTS_DETAILS_DIR_PATH", f"generated_modules/unique_ops/" - ) - export_tvm_generated_unique_op_tests_details_dir_path = os.path.join( - export_tvm_generated_unique_op_tests_details_dir_path, module_name + export_tvm_unique_ops_details_dir_path = os.getenv( + "FORGE_EXPORT_TVM_UNIQUE_OPS_CONFIG_DETAILS_DIR_PATH", f"generated_modules/unique_ops/" ) - if not os.path.exists(export_tvm_generated_unique_op_tests_details_dir_path): - os.makedirs(export_tvm_generated_unique_op_tests_details_dir_path) + export_tvm_unique_ops_details_dir_path = os.path.join(export_tvm_unique_ops_details_dir_path, module_name) + if not os.path.exists(export_tvm_unique_ops_details_dir_path): + os.makedirs(export_tvm_unique_ops_details_dir_path) - export_tvm_generated_unique_op_tests_details_file_path = os.path.join( - export_tvm_generated_unique_op_tests_details_dir_path, - "tvm_generated_unique_op_test_details.xlsx", + export_tvm_unique_ops_details_file_path = os.path.join( + export_tvm_unique_ops_details_dir_path, + "tvm_generated_unique_ops_config_details.xlsx", ) - unique_ops_metadata_path = os.path.join( - export_tvm_generated_unique_op_tests_details_dir_path, + unique_ops_metadata_file_path = os.path.join( + export_tvm_unique_ops_details_dir_path, "tvm_generated_unique_ops_metadata.json", ) - with open(unique_ops_metadata_path, "w") as json_file: + with open(unique_ops_metadata_file_path, "w") as json_file: json.dump(unique_ops_metadata, json_file, indent=4) create_excel_file( title=module_name, headers=headers, rows=rows, - output_file_path=export_tvm_generated_unique_op_tests_details_file_path, + output_file_path=export_tvm_unique_ops_details_file_path, ) -def generate_unique_op_tests( +def extract_and_generate_unique_ops_tests( ops, current_module_name, framework, @@ -502,6 +536,7 @@ def generate_unique_op_tests( # Extract unique operations by comparing operands types, shapes and dtypes and arguments if any unique_operations = UniqueOperations.create_unique_operations(ops, named_parameters, node_name_to_node_type) + logger.info(f"UniqueOperations:\n{unique_operations}") def get_param_const(name): for nid, param in params.items(): @@ -512,8 +547,284 @@ def get_param_const(name): return nid, const logger.error(f"There is no paramter/constant with the name {name}") - unique_operation_details = [] - for op_idx, forge_op_function_name in enumerate(sorted(unique_operations)): + if compiler_cfg.tvm_generate_unique_ops_tests: + + unique_operation_details = [] + for op_idx, forge_op_function_name in enumerate(sorted(unique_operations)): + + # Extract operation name from forge op function name + op_name = forge_op_function_name.split(".")[-1].lower() + + module_name = "test_" + op_name + + # Initialize Forge writer and generate header with pytest specific imports + writer = ForgeWriter( + module_name, + framework, + module_directory=f"generated_modules/unique_ops/{current_module_name}", + contains_incompatible_np_floats=contains_incompatible_np_floats, + delete_inputs=delete_inputs, + ) + writer.write_header(include_pytest_imports=True) + + # Get the unique operands and operation arguments assiocated the operand names + unique_operands_and_opargs_opmetadata = unique_operations[ + forge_op_function_name + ].get_unique_operands_and_opargs_opmetadata() + + pytest_input_shapes_and_dtypes_list = [] + forge_module_names = [] + module_idx = 0 + forge_module_list = [] + test_count = 0 + for operands_idx, (operands, opargs_opmetadata) in enumerate(unique_operands_and_opargs_opmetadata): + + for args_idx, (args, operation_metadata) in enumerate(opargs_opmetadata.get_op_args_and_metadata()): + + operand_types = operands.get_operand_types() + operand_shapes = operands.get_operand_shapes() + operand_dtypes = operands.get_operand_dtypes() + operand_names = operation_metadata["operand_names"][0] + + if compiler_cfg.export_tvm_unique_ops_config_details: + operation_info = {} + operation_info["Op"] = forge_op_function_name + operation_info["Operand_Names"] = str(operand_names) + operation_info["Operand_Shapes"] = str( + [ + operand_name if operand_type == NodeType.Constant else operand_shape + for operand_type, operand_shape, operand_name in zip( + operand_types, operand_shapes, operand_names + ) + ] + ) + operation_info["Operand_Types"] = str( + [NodeType.to_json(operand_type) for operand_type in operand_types] + ) + operation_info["Operand_Dtypes"] = str(operand_dtypes) + operation_info["Args"] = str(args) + + # Check if all operands types are parameters or constants and change the operand type from + # parameters or constants to activation and pass it as activation to forge module forward function + all_params_const = all( + [ + True if (operand_type == NodeType.Parameter or operand_type == NodeType.Constant) else False + for operand_type in operand_types + ] + ) + if all_params_const: + operand_types = [NodeType.Activation] * len(operand_types) + operand_shapes = operand_names + operand_names = [op_name + "_input_" + str(idx) for idx in range(len(operand_names))] + + # Check if an existing Forge module matches the current operation configuration. + # This involves comparing the number of inputs, operand types, activation operand count, + # and arguments. If a match is found, further checks are made to ensure that the parameter + # shapes and data types, or constants, match as well. If a match is found for either parameters + # or constants, the new Forge module creation is skipped. If no match is found, a new Forge module + # will be created for the current operation configuration. + need_to_create_forge_module = True + for forge_mod in forge_module_list: + if ( + len(forge_mod["operand_types"]) == len(operand_types) + and forge_mod["operand_types"] == operand_types + ): + if ( + forge_mod["number_of_activation"] + == len( + list( + filter(lambda operand_type: operand_type == NodeType.Activation, operand_types) + ) + ) + and forge_mod["args"] == args + ): + param_shape_dtype_list = [ + (operand_shape, operand_dtype) + for operand_type, operand_shape, operand_dtype in zip( + operand_types, operand_shapes, operand_dtypes + ) + if operand_type == NodeType.Parameter + ] + + const_list = [ + operand_shape + for operand_type, operand_shape in zip(operand_types, operand_shapes) + if operand_type == NodeType.Constant + ] + + if forge_mod["number_of_parameters"] > 0 and len(param_shape_dtype_list) > 0: + if len(param_shape_dtype_list) == forge_mod["number_of_parameters"]: + params_shape_dtype_equal = all( + [ + True if (shape1 == shape2 and dtype1 == dtype2) else False + for (shape1, dtype1), (shape2, dtype2) in zip( + forge_mod["param_shape_dtype_list"], param_shape_dtype_list + ) + ] + ) + if params_shape_dtype_equal: + need_to_create_forge_module = False + forge_module_names.append(forge_mod["class_name"]) + break + elif forge_mod["number_of_constants"] > 0 and len(const_list) > 0: + if len(const_list) == forge_mod["number_of_constants"]: + const_equal = all( + [ + True if torch.equal(const1, const2) else False + for const1, const2 in zip(forge_mod["const_list"], const_list) + ] + ) + if const_equal: + need_to_create_forge_module = False + forge_module_names.append(forge_mod["class_name"]) + break + else: + need_to_create_forge_module = False + forge_module_names.append(forge_mod["class_name"]) + break + + # If no matching Forge module was found, create a new one for the current operation configuration + if need_to_create_forge_module: + + # Generate class name and append it forge_module_names list for using it as pytest parameter. + class_name = current_module_name.lower() + op_name + str(module_idx) + class_name = class_name.title().replace("_", "") + forge_module_names.append(class_name) + + needed_params = {} + needed_consts = {} + params_shape_dtype_list = [] + const_list = [] + forward_method_inputs = {} + new_operand_names = [] + + # Iterate through operand types and names to classify them as parameters, constants, or activations. + # Collect the necessary parameters and constants, and use them to generate the class definition and + # handle activations for the forward method inputs. + for idx, (operand_type, operand_name) in enumerate(zip(operand_types, operand_names)): + if operand_type == NodeType.Parameter: + nid, param_tuple = get_param_const(operand_name) + needed_params[nid] = param_tuple + params_shape_dtype_list.append([param_tuple[1], param_tuple[3]]) + new_operand_names.append(operand_name) + elif operand_type == NodeType.Constant: + nid, const_tuple = get_param_const(operand_name) + needed_consts[nid] = const_tuple + const_list.append(named_parameters[operand_name]) + new_operand_names.append(operand_name) + else: + if operand_name not in forward_method_inputs.values(): + forward_method_inputs[idx] = operand_name + else: + forward_method_inputs[idx] = op_name + "_input_" + str(idx) + logger.warning( + f"operand_name {operand_name} is already present in the forward_method_inputs {forward_method_inputs}" + ) + new_operand_names.append(forward_method_inputs[idx]) + + # Generate the class definition with the collected parameters and constants. + writer.write_class_definition( + params=needed_params, constants=needed_consts, class_name=class_name + ) + + # Create a single operation with the function name, output name, + # input operand names, and arguments and use it for generating forward method + single_op = { + args_idx: Operation( + function_name=forge_op_function_name, + output_name=op_name + "_output_1", + input_names=new_operand_names, + args=tuple(args.items()), + ) + } + + forward_method_returns = {args_idx: single_op[args_idx].output_name} + + # Generate forge module forward function + writer.write_forward(single_op, forward_method_inputs, forward_method_returns) + + # If there are any parameters or constants, generate the parameter parser function. + if len(needed_params) != 0 or len(needed_consts) != 0: + writer.write_param_parser( + param_names, param_file_name, named_params_file_name, named_buffers_file_name + ) + + module_idx += 1 + forge_module_list.append( + { + "class_name": class_name, + "operand_types": operand_types, + "number_of_activation": len(forward_method_inputs), + "number_of_parameters": len(needed_params), + "number_of_constants": len(needed_consts), + "param_shape_dtype_list": params_shape_dtype_list, + "const_list": const_list, + "args": args, + } + ) + + # Collect activation input shapes and dtypes for using it in pytest parameter + pytest_input_shapes_dtypes = [] + for operand_type, operand_shape, operand_dtype in zip( + operand_types, operand_shapes, operand_dtypes + ): + if operand_type == NodeType.Activation: + pytest_input_shapes_dtypes.append((operand_shape, operand_dtype)) + pytest_input_shapes_and_dtypes_list.append(pytest_input_shapes_dtypes) + + if compiler_cfg.export_tvm_unique_ops_config_details: + operation_info["Testfile"] = ( + writer.module_directory + + "/" + + writer.filename + + f"::test_module[forge_module_and_shapes_dtypes{test_count}]" + ) + unique_operation_details.append(operation_info) + test_count += 1 + + # If the parameter/constant is passed as activation, operand shape will be replaced with operand name + # because instead of generating tensor from shape, use actual tensor from model parameters/buffers + # and so generating function for loading the model parameters/buffers and saving it as named_parameter variable + need_model_parameter_function = any( + [ + True if isinstance(shape, str) else False + for pytest_input_shapes_dtypes in pytest_input_shapes_and_dtypes_list + for shape, _ in pytest_input_shapes_dtypes + ] + ) + if need_model_parameter_function: + writer.write_model_parameter_function(param_file_name, named_params_file_name, named_buffers_file_name) + + # Generate pytest function for the operation with pytest parameter containing list of tuple + # and each tuple constaints module name, tuple of operand shape/name and dtype + writer.write_pytest_function( + forge_module_names=forge_module_names, + pytest_input_shapes_and_dtypes_list=pytest_input_shapes_and_dtypes_list, + ) + + writer.close_file() + + else: + unique_operation_details = unique_operations.create_list_of_dict() + + if compiler_cfg.export_tvm_unique_ops_config_details: + unique_ops_metadata = { + "framework": framework, + "module_name": current_module_name, + "param_file_name": param_file_name, + "named_params_file_name": named_params_file_name, + "named_buffers_file_name": named_buffers_file_name, + } + export_unique_op_configuration_info(current_module_name, unique_operation_details, unique_ops_metadata) + + +def generate_models_ops_test(unique_operations: UniqueOperations, models_ops_test_output_directory_path: str): + """ + Generate models ops test forge modules with test function from the provided unique operation configuration extracted across all the models + """ + + # Iterate over the unique operations dictonary after sorting it by operation name. + for forge_op_function_name in sorted(unique_operations): # Extract operation name from forge op function name op_name = forge_op_function_name.split(".")[-1].lower() @@ -523,14 +834,12 @@ def get_param_const(name): # Initialize Forge writer and generate header with pytest specific imports writer = ForgeWriter( module_name, - framework, - module_directory=f"generated_modules/unique_ops/{current_module_name}", - contains_incompatible_np_floats=contains_incompatible_np_floats, - delete_inputs=delete_inputs, + framework="pytorch", # Currently unique operation extraction is supported for pytorch framework so explicitly specifying the framework as pytorch + module_directory=models_ops_test_output_directory_path, ) writer.write_header(include_pytest_imports=True) - # Get the unique operands and operation arguments assiocated the operand names + # Get the unique operands and operation arguments assiocated with the operation metadata unique_operands_and_opargs_opmetadata = unique_operations[ forge_op_function_name ].get_unique_operands_and_opargs_opmetadata() @@ -539,33 +848,13 @@ def get_param_const(name): forge_module_names = [] module_idx = 0 forge_module_list = [] - test_count = 0 for operands_idx, (operands, opargs_opmetadata) in enumerate(unique_operands_and_opargs_opmetadata): for args_idx, (args, operation_metadata) in enumerate(opargs_opmetadata.get_op_args_and_metadata()): - operand_types = operands.get_operand_types() operand_shapes = operands.get_operand_shapes() + operand_types = operands.get_operand_types() operand_dtypes = operands.get_operand_dtypes() - operand_names = operation_metadata["operand_names"][0] - - if compiler_cfg.export_tvm_generated_unique_op_tests_details: - operation_info = {} - operation_info["Op"] = forge_op_function_name - operation_info["Operand_Names"] = str(operand_names) - operation_info["Operand_Shapes"] = str( - [ - operand_name if operand_type == NodeType.Constant else operand_shape - for operand_type, operand_shape, operand_name in zip( - operand_types, operand_shapes, operand_names - ) - ] - ) - operation_info["Operand_Types"] = str( - [NodeType.to_json(operand_type) for operand_type in operand_types] - ) - operation_info["Operand_Dtypes"] = str(operand_dtypes) - operation_info["Args"] = str(args) # Check if all operands types are parameters or constants and change the operand type from # parameters or constants to activation and pass it as activation to forge module forward function @@ -577,8 +866,6 @@ def get_param_const(name): ) if all_params_const: operand_types = [NodeType.Activation] * len(operand_types) - operand_shapes = operand_names - operand_names = [op_name + "_input_" + str(idx) for idx in range(len(operand_names))] # Check if an existing Forge module matches the current operation configuration. # This involves comparing the number of inputs, operand types, activation operand count, @@ -606,10 +893,11 @@ def get_param_const(name): ) if operand_type == NodeType.Parameter ] - - const_list = [ - operand_shape - for operand_type, operand_shape in zip(operand_types, operand_shapes) + const_shape_dtype_list = [ + (operand_shape, operand_dtype) + for operand_type, operand_shape, operand_dtype in zip( + operand_types, operand_shapes, operand_dtypes + ) if operand_type == NodeType.Constant ] @@ -627,15 +915,17 @@ def get_param_const(name): need_to_create_forge_module = False forge_module_names.append(forge_mod["class_name"]) break - elif forge_mod["number_of_constants"] > 0 and len(const_list) > 0: - if len(const_list) == forge_mod["number_of_constants"]: - const_equal = all( + elif forge_mod["number_of_constants"] > 0 and len(const_shape_dtype_list) > 0: + if len(const_shape_dtype_list) == forge_mod["number_of_constants"]: + const_shape_dtype_equal = all( [ - True if torch.equal(const1, const2) else False - for const1, const2 in zip(forge_mod["const_list"], const_list) + True if (shape1 == shape2 and dtype1 == dtype2) else False + for (shape1, dtype1), (shape2, dtype2) in zip( + forge_mod["const_shape_dtype_list"], const_shape_dtype_list + ) ] ) - if const_equal: + if const_shape_dtype_equal: need_to_create_forge_module = False forge_module_names.append(forge_mod["class_name"]) break @@ -648,40 +938,38 @@ def get_param_const(name): if need_to_create_forge_module: # Generate class name and append it forge_module_names list for using it as pytest parameter. - class_name = current_module_name.lower() + op_name + str(module_idx) + class_name = op_name + str(module_idx) class_name = class_name.title().replace("_", "") forge_module_names.append(class_name) needed_params = {} needed_consts = {} params_shape_dtype_list = [] - const_list = [] + const_shape_dtype_list = [] forward_method_inputs = {} - new_operand_names = [] + operand_names = [] - # Iterate through operand types and names to classify them as parameters, constants, or activations. + # Iterate through operand types to classify them as parameters, constants, or activations. # Collect the necessary parameters and constants, and use them to generate the class definition and # handle activations for the forward method inputs. - for idx, (operand_type, operand_name) in enumerate(zip(operand_types, operand_names)): + for idx, (operand_type, operand_shape, operand_dtype) in enumerate( + zip(operand_types, operand_shapes, operand_dtypes) + ): if operand_type == NodeType.Parameter: - nid, param_tuple = get_param_const(operand_name) - needed_params[nid] = param_tuple - params_shape_dtype_list.append([param_tuple[1], param_tuple[3]]) - new_operand_names.append(operand_name) + parameter_name = class_name.lower() + ".weight_" + str(idx) + param_tuple = (parameter_name, operand_shape, True, operand_dtype) + needed_params[idx] = param_tuple + params_shape_dtype_list.append([operand_shape, operand_dtype]) + operand_names.append(parameter_name) elif operand_type == NodeType.Constant: - nid, const_tuple = get_param_const(operand_name) - needed_consts[nid] = const_tuple - const_list.append(named_parameters[operand_name]) - new_operand_names.append(operand_name) + constant_name = class_name.lower() + "_const_" + str(idx) + const_tuple = (constant_name, operand_shape, operand_dtype) + needed_consts[idx] = const_tuple + const_shape_dtype_list.append([operand_shape, operand_dtype]) + operand_names.append(constant_name) else: - if operand_name not in forward_method_inputs.values(): - forward_method_inputs[idx] = operand_name - else: - forward_method_inputs[idx] = op_name + "_input_" + str(idx) - logger.warning( - f"operand_name {operand_name} is already present in the forward_method_inputs {forward_method_inputs}" - ) - new_operand_names.append(forward_method_inputs[idx]) + forward_method_inputs[idx] = op_name + "_input_" + str(idx) + operand_names.append(forward_method_inputs[idx]) # Generate the class definition with the collected parameters and constants. writer.write_class_definition(params=needed_params, constants=needed_consts, class_name=class_name) @@ -692,7 +980,7 @@ def get_param_const(name): args_idx: Operation( function_name=forge_op_function_name, output_name=op_name + "_output_1", - input_names=new_operand_names, + input_names=operand_names, args=tuple(args.items()), ) } @@ -702,12 +990,6 @@ def get_param_const(name): # Generate forge module forward function writer.write_forward(single_op, forward_method_inputs, forward_method_returns) - # If there are any parameters or constants, generate the parameter parser function. - if len(needed_params) != 0 or len(needed_consts) != 0: - writer.write_param_parser( - param_names, param_file_name, named_params_file_name, named_buffers_file_name - ) - module_idx += 1 forge_module_list.append( { @@ -717,7 +999,7 @@ def get_param_const(name): "number_of_parameters": len(needed_params), "number_of_constants": len(needed_consts), "param_shape_dtype_list": params_shape_dtype_list, - "const_list": const_list, + "const_shape_dtype_list": const_shape_dtype_list, "args": args, } ) @@ -729,45 +1011,17 @@ def get_param_const(name): pytest_input_shapes_dtypes.append((operand_shape, operand_dtype)) pytest_input_shapes_and_dtypes_list.append(pytest_input_shapes_dtypes) - if compiler_cfg.export_tvm_generated_unique_op_tests_details: - operation_info["Testfile"] = ( - writer.module_directory - + "/" - + writer.filename - + f"::test_module[forge_module_and_shapes_dtypes{test_count}]" - ) - unique_operation_details.append(operation_info) - test_count += 1 - - # If the parameter/constant is passed as activation, operand shape will be replaced with operand name - # because instead of generating tensor from shape, use actual tensor from model parameters/buffers - # and so generating function for loading the model parameters/buffers and saving it as named_parameter variable - need_model_parameter_function = any( - [ - True if isinstance(shape, str) else False - for pytest_input_shapes_dtypes in pytest_input_shapes_and_dtypes_list - for shape, _ in pytest_input_shapes_dtypes - ] - ) - if need_model_parameter_function: - writer.write_model_parameter_function(param_file_name, named_params_file_name, named_buffers_file_name) + # List of marker that will added at the top of the test function + markers = ["push"] # Generate pytest function for the operation with pytest parameter containing list of tuple # and each tuple constaints module name, tuple of operand shape/name and dtype writer.write_pytest_function( - forge_module_names, - framework, - pytest_input_shapes_and_dtypes_list, + forge_module_names=forge_module_names, + pytest_input_shapes_and_dtypes_list=pytest_input_shapes_and_dtypes_list, + markers=markers, + use_ids_function=True, + include_random_parameter_constant_gen=True, ) writer.close_file() - - if compiler_cfg.export_tvm_generated_unique_op_tests_details: - unique_ops_metadata = { - "framework": framework, - "module_name": current_module_name, - "param_file_name": param_file_name, - "named_params_file_name": named_params_file_name, - "named_buffers_file_name": named_buffers_file_name, - } - export_unique_op_configuration_info(current_module_name, unique_operation_details, unique_ops_metadata) diff --git a/forge/test/conftest.py b/forge/test/conftest.py index c20e48477..00adb7fe2 100644 --- a/forge/test/conftest.py +++ b/forge/test/conftest.py @@ -101,10 +101,16 @@ def clear_forge(): def pytest_addoption(parser): parser.addoption( - "--generate-unique-op-tests", + "--generate-unique-ops-tests", action="store_true", default=False, - help="Generate unique op tests for the given model", + help="Generate unique ops tests for the given model", + ) + parser.addoption( + "--extract-tvm-unique-ops-config", + action="store_true", + default=False, + help="Extract the tvm unique op configuration for the given model", ) parser.addoption( "--silicon-only", action="store_true", default=False, help="run silicon tests only, skip golden/model" @@ -182,9 +188,11 @@ def initialize_global_compiler_configuration_based_on_pytest_args(pytestconfig): """ compiler_cfg = _get_global_compiler_config() - compiler_cfg.tvm_generate_unique_op_tests = pytestconfig.getoption("--generate-unique-op-tests") + compiler_cfg.tvm_generate_unique_ops_tests = pytestconfig.getoption("--generate-unique-ops-tests") + + compiler_cfg.extract_tvm_unique_ops_config = pytestconfig.getoption("--extract-tvm-unique-ops-config") - if compiler_cfg.tvm_generate_unique_op_tests: + if compiler_cfg.tvm_generate_unique_ops_tests or compiler_cfg.extract_tvm_unique_ops_config: # For running standalone tests, we need to retain the generated python files # together with stored model parameters compiler_cfg.retain_tvm_python_files = True @@ -449,11 +457,11 @@ def pytest_collection_modifyitems(config, items): marker = config.getoption("-m") # Get the marker from the -m option - if marker and marker == "model_analysis": # If a marker is specified + if marker and marker == "not skip_model_analysis": # If a marker is specified print("Automatic Model Analysis Collected tests: ") test_count = 0 for item in items: - if marker in item.keywords: + if "skip_model_analysis" not in item.keywords: test_file_path = item.location[0] test_name = item.location[2] print(f"{test_file_path}::{test_name}") diff --git a/forge/test/models/onnx/vision/ddrnet/test_ddrnet.py b/forge/test/models/onnx/vision/ddrnet/test_ddrnet.py index bcae2c0ab..0286dd18e 100644 --- a/forge/test/models/onnx/vision/ddrnet/test_ddrnet.py +++ b/forge/test/models/onnx/vision/ddrnet/test_ddrnet.py @@ -15,6 +15,7 @@ variants = ["ddrnet23s", "ddrnet23", "ddrnet39"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly @@ -77,6 +78,7 @@ def test_ddrnet(variant, test_device): variants = ["ddrnet_23_slim_1024"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/dla/test_dla.py b/forge/test/models/onnx/vision/dla/test_dla.py index 7d796840b..b7aef7144 100644 --- a/forge/test/models/onnx/vision/dla/test_dla.py +++ b/forge/test/models/onnx/vision/dla/test_dla.py @@ -29,6 +29,7 @@ ] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/fpn/test_fpn.py b/forge/test/models/onnx/vision/fpn/test_fpn.py index 5b8f3ebcb..b17d483e7 100644 --- a/forge/test/models/onnx/vision/fpn/test_fpn.py +++ b/forge/test/models/onnx/vision/fpn/test_fpn.py @@ -10,6 +10,7 @@ from forge import DepricatedVerifyConfig +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.nightly def test_fpn_onnx(test_device, test_kind): diff --git a/forge/test/models/onnx/vision/hardnet/test_hardnet.py b/forge/test/models/onnx/vision/hardnet/test_hardnet.py index 5ff03361d..478a368a4 100644 --- a/forge/test/models/onnx/vision/hardnet/test_hardnet.py +++ b/forge/test/models/onnx/vision/hardnet/test_hardnet.py @@ -16,6 +16,7 @@ variants = ["hardnet68", "hardnet85", "hardnet68ds", "hardnet39ds"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/lstm/test_lstm_genom.py b/forge/test/models/onnx/vision/lstm/test_lstm_genom.py index b4136ce63..7d5968c4a 100644 --- a/forge/test/models/onnx/vision/lstm/test_lstm_genom.py +++ b/forge/test/models/onnx/vision/lstm/test_lstm_genom.py @@ -14,6 +14,7 @@ from test.utils import download_model +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.nightly def test_lstm_genom_onnx(test_device): diff --git a/forge/test/models/onnx/vision/lstm/test_lstm_valence.py b/forge/test/models/onnx/vision/lstm/test_lstm_valence.py index a28f13352..2b3acf880 100644 --- a/forge/test/models/onnx/vision/lstm/test_lstm_valence.py +++ b/forge/test/models/onnx/vision/lstm/test_lstm_valence.py @@ -13,6 +13,7 @@ from forge.verify.config import TestKind +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.nightly def test_lstm_valence_onnx(test_device): diff --git a/forge/test/models/onnx/vision/perceiverio/test_perceiverio.py b/forge/test/models/onnx/vision/perceiverio/test_perceiverio.py index 1e73157a1..0518c5b5c 100644 --- a/forge/test/models/onnx/vision/perceiverio/test_perceiverio.py +++ b/forge/test/models/onnx/vision/perceiverio/test_perceiverio.py @@ -25,6 +25,7 @@ def get_sample_data(model_name): return pixel_values +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize( "model_name", diff --git a/forge/test/models/onnx/vision/retinanet/test_retinanet.py b/forge/test/models/onnx/vision/retinanet/test_retinanet.py index 36b519a9e..1f38cceee 100644 --- a/forge/test/models/onnx/vision/retinanet/test_retinanet.py +++ b/forge/test/models/onnx/vision/retinanet/test_retinanet.py @@ -49,6 +49,7 @@ def img_preprocess(scal_val=1): ######### +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.nightly def test_retinanet_r101_640x480_onnx(test_device): @@ -121,6 +122,7 @@ def img_preprocessing(): ] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/segformer/test_segformer_imgcls.py b/forge/test/models/onnx/vision/segformer/test_segformer_imgcls.py index 5a3ce85bf..f74776e2c 100644 --- a/forge/test/models/onnx/vision/segformer/test_segformer_imgcls.py +++ b/forge/test/models/onnx/vision/segformer/test_segformer_imgcls.py @@ -33,6 +33,7 @@ def get_sample_data(model_name): ] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants_img_classification) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/segformer/test_segformer_semseg.py b/forge/test/models/onnx/vision/segformer/test_segformer_semseg.py index 0c11fdd3a..f484feec0 100644 --- a/forge/test/models/onnx/vision/segformer/test_segformer_semseg.py +++ b/forge/test/models/onnx/vision/segformer/test_segformer_semseg.py @@ -32,6 +32,7 @@ def get_sample_data(model_name): ] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants_semseg) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/yolo/test_yolo_v3.py b/forge/test/models/onnx/vision/yolo/test_yolo_v3.py index e7eeba93d..161e14a64 100644 --- a/forge/test/models/onnx/vision/yolo/test_yolo_v3.py +++ b/forge/test/models/onnx/vision/yolo/test_yolo_v3.py @@ -45,6 +45,7 @@ def preprocess(img): ######### +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="While loop in model, not supported yet") @pytest.mark.nightly def test_yolov3_tiny_onnx(test_device): @@ -78,6 +79,7 @@ def test_yolov3_tiny_onnx(test_device): ) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="While loop in model, not supported yet") @pytest.mark.nightly def test_yolov3_onnx(test_device): diff --git a/forge/test/models/onnx/vision/yolo/test_yolo_v5.py b/forge/test/models/onnx/vision/yolo/test_yolo_v5.py index ea3cb6361..15f0e5b55 100644 --- a/forge/test/models/onnx/vision/yolo/test_yolo_v5.py +++ b/forge/test/models/onnx/vision/yolo/test_yolo_v5.py @@ -58,6 +58,7 @@ def data_preprocessing(ims: Image.Image, size: tuple) -> tuple: variants = ["yolov5n", "yolov5s", "yolov5m", "yolov5l", "yolov5x"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly @@ -105,6 +106,7 @@ def test_yolo_v5_320x320_onnx(test_device, variant): variants = ["yolov5n", "yolov5s", "yolov5m", "yolov5l", "yolov5x"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly @@ -187,6 +189,7 @@ def test_yolo_v5_480x480_onnx(test_device, variant): variants = ["yolov5n", "yolov5s", "yolov5m", "yolov5l", "yolov5x"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/onnx/vision/yolo/test_yolo_x.py b/forge/test/models/onnx/vision/yolo/test_yolo_x.py index b0417b6b1..4f4c712fa 100644 --- a/forge/test/models/onnx/vision/yolo/test_yolo_x.py +++ b/forge/test/models/onnx/vision/yolo/test_yolo_x.py @@ -38,6 +38,7 @@ def preprocess(img, input_size, swap=(2, 0, 1)): variants = ["yolox_nano", "yolox_tiny", "yolox_s", "yolox_m", "yolox_l", "yolox_darknet", "yolox_x"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Not supported") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/pytorch/audio/stereo/test_stereo.py b/forge/test/models/pytorch/audio/stereo/test_stereo.py index 557092d77..1c3d42234 100644 --- a/forge/test/models/pytorch/audio/stereo/test_stereo.py +++ b/forge/test/models/pytorch/audio/stereo/test_stereo.py @@ -19,7 +19,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.xfail(reason="[optimized_graph] Trying to access element outside of dimensions: 3") def test_stereo(variant): diff --git a/forge/test/models/pytorch/audio/whisper/test_whisper_0.py b/forge/test/models/pytorch/audio/whisper/test_whisper_0.py index e9c50b8d1..9dce75fed 100644 --- a/forge/test/models/pytorch/audio/whisper/test_whisper_0.py +++ b/forge/test/models/pytorch/audio/whisper/test_whisper_0.py @@ -99,7 +99,6 @@ def forward(self, decoder_input_ids, encoder_hidden_states): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_whisper(test_device, variant): @@ -113,6 +112,7 @@ def test_whisper(test_device, variant): ) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") @@ -174,6 +174,7 @@ def test_whisper_pipeline(test_device, variant): assert cpu_out["text"] == tt_out["text"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Not supported") diff --git a/forge/test/models/pytorch/audio/whisper/test_whisper_1.py b/forge/test/models/pytorch/audio/whisper/test_whisper_1.py index 38612dbe4..c9b2fbfd3 100644 --- a/forge/test/models/pytorch/audio/whisper/test_whisper_1.py +++ b/forge/test/models/pytorch/audio/whisper/test_whisper_1.py @@ -40,6 +40,7 @@ ] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") @@ -73,6 +74,7 @@ def test_whisper_dec_past_cache(test_device, variant): break +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="not supported yet") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -345,6 +347,7 @@ def test_whisper_enc_dec(test_device, variant): print(f"generated tokens: {tokenizer.decode(generated_tokens)}") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") diff --git a/forge/test/models/pytorch/audio/whisper/test_whisper_3.py b/forge/test/models/pytorch/audio/whisper/test_whisper_3.py index cad076d1e..c81df0313 100644 --- a/forge/test/models/pytorch/audio/whisper/test_whisper_3.py +++ b/forge/test/models/pytorch/audio/whisper/test_whisper_3.py @@ -30,7 +30,6 @@ def forward(self, decoder_input_ids, encoder_hidden_states): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail( reason='RuntimeError: TT_ASSERT @ /tt-forge-fe/forge/csrc/passes/commute_utils.cpp:1103: reshape->op_name() == "reshape"' ) diff --git a/forge/test/models/pytorch/multimodal/clip/test_clip.py b/forge/test/models/pytorch/multimodal/clip/test_clip.py index 16e52c65f..0982162b1 100644 --- a/forge/test/models/pytorch/multimodal/clip/test_clip.py +++ b/forge/test/models/pytorch/multimodal/clip/test_clip.py @@ -17,7 +17,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_clip_pytorch(test_device): # Set Forge configuration parameters diff --git a/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion.py b/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion.py index e83bc0625..6acd50a1a 100644 --- a/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion.py +++ b/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion.py @@ -16,6 +16,7 @@ ) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="unsupported for now") @pytest.mark.nightly def test_stable_diffusion_pytorch(variant="CompVis/stable-diffusion-v1-4", batch_size=1): diff --git a/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py b/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py index 82ebbc8d7..638052cff 100644 --- a/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py +++ b/forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py @@ -28,7 +28,7 @@ def forward(self, input_tensor): @pytest.mark.nightly -@pytest.mark.model_analysis +@pytest.mark.skip_model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.xfail( reason="RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient" diff --git a/forge/test/models/pytorch/multimodal/vilt/test_vilt.py b/forge/test/models/pytorch/multimodal/vilt/test_vilt.py index 4d7b85cad..dfc95aced 100644 --- a/forge/test/models/pytorch/multimodal/vilt/test_vilt.py +++ b/forge/test/models/pytorch/multimodal/vilt/test_vilt.py @@ -51,7 +51,6 @@ def generate_model_vilt_question_answering_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_vilt_question_answering_hf_pytorch(variant, test_device): model, inputs, _ = generate_model_vilt_question_answering_hf_pytorch( @@ -95,7 +94,6 @@ def generate_model_vilt_maskedlm_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="pcc=0.9498278562793674") @pytest.mark.parametrize("variant", variants, ids=variants) def test_vilt_maskedlm_hf_pytorch(variant, test_device): diff --git a/forge/test/models/pytorch/text/albert/test_albert.py b/forge/test/models/pytorch/text/albert/test_albert.py index de27b5f1b..7e57ed115 100644 --- a/forge/test/models/pytorch/text/albert/test_albert.py +++ b/forge/test/models/pytorch/text/albert/test_albert.py @@ -13,7 +13,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.parametrize("size", sizes, ids=sizes) @@ -57,7 +56,6 @@ def test_albert_masked_lm_pytorch(size, variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.parametrize("size", sizes, ids=sizes) diff --git a/forge/test/models/pytorch/text/bart/test_bart.py b/forge/test/models/pytorch/text/bart/test_bart.py index 02ecad419..301a20374 100644 --- a/forge/test/models/pytorch/text/bart/test_bart.py +++ b/forge/test/models/pytorch/text/bart/test_bart.py @@ -23,7 +23,6 @@ def forward(self, input_ids, attention_mask, decoder_input_ids): @pytest.mark.nightly -@pytest.mark.model_analysis def test_pt_bart_classifier(test_device): compiler_cfg = _get_global_compiler_config() compiler_cfg.compile_depth = CompileDepth.SPLIT_GRAPH diff --git a/forge/test/models/pytorch/text/bert/test_bert.py b/forge/test/models/pytorch/text/bert/test_bert.py index c80f994c8..792b6fe4b 100644 --- a/forge/test/models/pytorch/text/bert/test_bert.py +++ b/forge/test/models/pytorch/text/bert/test_bert.py @@ -39,7 +39,6 @@ def generate_model_bert_maskedlm_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") def test_bert_masked_lm_pytorch(test_device): model, inputs, _ = generate_model_bert_maskedlm_hf_pytorch("bert-base-uncased") @@ -89,7 +88,6 @@ def generate_model_bert_qa_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") def test_bert_question_answering_pytorch(test_device): model, inputs, _ = generate_model_bert_qa_hf_pytorch("bert-large-cased-whole-word-masking-finetuned-squad") @@ -130,7 +128,6 @@ def generate_model_bert_seqcls_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_bert_sequence_classification_pytorch(test_device): model, inputs, _ = generate_model_bert_seqcls_hf_pytorch( "textattack/bert-base-uncased-SST-2", @@ -171,7 +168,6 @@ def generate_model_bert_tkcls_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") def test_bert_token_classification_pytorch(test_device): model, inputs, _ = generate_model_bert_tkcls_hf_pytorch("dbmdz/bert-large-cased-finetuned-conll03-english") diff --git a/forge/test/models/pytorch/text/codegen/test_codegen.py b/forge/test/models/pytorch/text/codegen/test_codegen.py index e513a5bc0..39c179b3a 100644 --- a/forge/test/models/pytorch/text/codegen/test_codegen.py +++ b/forge/test/models/pytorch/text/codegen/test_codegen.py @@ -20,7 +20,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Tensor 41 - data type mismatch: expected Float32, got BFloat16") @pytest.mark.parametrize("variant", variants, ids=variants) def test_codegen(test_device, variant): diff --git a/forge/test/models/pytorch/text/distilbert/test_distilbert.py b/forge/test/models/pytorch/text/distilbert/test_distilbert.py index 299ad2be2..80362774d 100644 --- a/forge/test/models/pytorch/text/distilbert/test_distilbert.py +++ b/forge/test/models/pytorch/text/distilbert/test_distilbert.py @@ -16,7 +16,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_distilbert_masked_lm_pytorch(variant, test_device): # Load DistilBert tokenizer and model from HuggingFace @@ -47,7 +46,6 @@ def test_distilbert_masked_lm_pytorch(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_distilbert_question_answering_pytorch(test_device): # Load Bert tokenizer and model from HuggingFace model_ckpt = "distilbert-base-cased-distilled-squad" @@ -84,7 +82,6 @@ def test_distilbert_question_answering_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_distilbert_sequence_classification_pytorch(test_device): # Load DistilBert tokenizer and model from HuggingFace @@ -112,7 +109,6 @@ def test_distilbert_sequence_classification_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_distilbert_token_classification_pytorch(test_device): # Load DistilBERT tokenizer and model from HuggingFace model_ckpt = "Davlan/distilbert-base-multilingual-cased-ner-hrl" diff --git a/forge/test/models/pytorch/text/dpr/test_dpr.py b/forge/test/models/pytorch/text/dpr/test_dpr.py index 34011a040..33e1b80f0 100644 --- a/forge/test/models/pytorch/text/dpr/test_dpr.py +++ b/forge/test/models/pytorch/text/dpr/test_dpr.py @@ -19,7 +19,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") @pytest.mark.parametrize("variant", variants, ids=variants) def test_dpr_context_encoder_pytorch(variant, test_device): @@ -61,7 +60,6 @@ def test_dpr_context_encoder_pytorch(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") @pytest.mark.parametrize("variant", variants, ids=variants) def test_dpr_question_encoder_pytorch(variant, test_device): @@ -102,7 +100,6 @@ def test_dpr_question_encoder_pytorch(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") @pytest.mark.parametrize("variant", variants, ids=variants) def test_dpr_reader_pytorch(variant, test_device): diff --git a/forge/test/models/pytorch/text/falcon/test_falcon.py b/forge/test/models/pytorch/text/falcon/test_falcon.py index 9a5703064..9493740fd 100644 --- a/forge/test/models/pytorch/text/falcon/test_falcon.py +++ b/forge/test/models/pytorch/text/falcon/test_falcon.py @@ -8,7 +8,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_falcon(test_device): compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/text/fuyu/test_fuyu_8b.py b/forge/test/models/pytorch/text/fuyu/test_fuyu_8b.py index 356bafe93..27e13cc82 100644 --- a/forge/test/models/pytorch/text/fuyu/test_fuyu_8b.py +++ b/forge/test/models/pytorch/text/fuyu/test_fuyu_8b.py @@ -30,7 +30,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_fuyu8b(test_device): # Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() @@ -76,6 +75,7 @@ def test_fuyu8b(test_device): os.remove("bus.png") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="not supported yet") def test_fuyu8b_past_cache(test_device): diff --git a/forge/test/models/pytorch/text/gemma/test_gemma_2b.py b/forge/test/models/pytorch/text/gemma/test_gemma_2b.py index 3b40abe6f..af33ad56a 100644 --- a/forge/test/models/pytorch/text/gemma/test_gemma_2b.py +++ b/forge/test/models/pytorch/text/gemma/test_gemma_2b.py @@ -49,6 +49,7 @@ def cpu_sanity_run_1(): ] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -90,6 +91,7 @@ def forward(self, x, pos_ids): compiled_model = forge.compile(pytorch_model, sample_inputs=inputs, module_name="pt_gemma_2b_rotary_embedding") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -129,6 +131,7 @@ def forward(self, x): compiled_model = forge.compile(pytorch_model, sample_inputs=inputs, module_name="pt_gemma_2b_rms_norm") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -171,6 +174,7 @@ def forward(self, hidden_states, attn_mask, pos_ids): compiled_model = forge.compile(pytorch_model, sample_inputs=inputs, module_name="pt_gemma_2b_attention") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -211,6 +215,7 @@ def forward(self, hidden_states): compiled_model = forge.compile(pytorch_model, sample_inputs=inputs, module_name="pt_gemma_2b_mlp") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -254,7 +259,6 @@ def forward(self, hidden_states, attn_mask, pos_ids): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_gemma_2b(test_device, variant): # Random see for reproducibility @@ -293,6 +297,7 @@ def test_gemma_2b(test_device, variant): compiled_model = forge.compile(pytorch_model, sample_inputs=inputs, module_name="pt_gemma_2b") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -375,6 +380,7 @@ def test_gemma_2b_gen(test_device, variant): print(f"{tt_ans}") +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") @pytest.mark.parametrize("variant", variants, ids=variants) diff --git a/forge/test/models/pytorch/text/gpt2/test_gpt2.py b/forge/test/models/pytorch/text/gpt2/test_gpt2.py index 56c906309..13c17da0d 100644 --- a/forge/test/models/pytorch/text/gpt2/test_gpt2.py +++ b/forge/test/models/pytorch/text/gpt2/test_gpt2.py @@ -11,7 +11,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Tensor 6 - data type mismatch: expected Float32, got BFloat16") def test_gpt2_text_gen(test_device): # Load tokenizer and model from HuggingFace @@ -63,6 +62,7 @@ def forward(self, input_ids, attention_mask, *kv): return self.model(input_ids, past_key_values, attention_mask) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="not supported yet") def test_gpt2_past_cache(test_device): diff --git a/forge/test/models/pytorch/text/gptneo/test_gptneo.py b/forge/test/models/pytorch/text/gptneo/test_gptneo.py index b6328f072..e47fbb2f1 100644 --- a/forge/test/models/pytorch/text/gptneo/test_gptneo.py +++ b/forge/test/models/pytorch/text/gptneo/test_gptneo.py @@ -23,7 +23,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_gptneo_causal_lm(variant, test_device): # Set random seed for repeatability diff --git a/forge/test/models/pytorch/text/llama/test_llama3.py b/forge/test/models/pytorch/text/llama/test_llama3.py index 97570c7f1..e19ec9ce0 100644 --- a/forge/test/models/pytorch/text/llama/test_llama3.py +++ b/forge/test/models/pytorch/text/llama/test_llama3.py @@ -117,7 +117,6 @@ def _update_causal_mask( @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_llama3_causal_lm(variant, test_device): # Configurations @@ -160,7 +159,6 @@ def test_llama3_causal_lm(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_llama3_sequence_classification(variant, test_device): diff --git a/forge/test/models/pytorch/text/mistral/test_mistral.py b/forge/test/models/pytorch/text/mistral/test_mistral.py index 29e7ce4bd..0715fc396 100644 --- a/forge/test/models/pytorch/text/mistral/test_mistral.py +++ b/forge/test/models/pytorch/text/mistral/test_mistral.py @@ -18,6 +18,7 @@ variants = ["mistralai/Mistral-7B-v0.1"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Tested as part of full model test run") @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.nightly @@ -43,7 +44,6 @@ def test_mistral_decoder_layer(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_mistral(variant, test_device): @@ -78,6 +78,7 @@ def test_mistral(variant, test_device): variants = ["mistralai/Mistral-7B-v0.1"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="This test currently serves the same purpose as test_mistral") @@ -144,6 +145,7 @@ def test_mistral_decode(variant, test_device): variants = ["mistralai/Mistral-7B-v0.1"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="under development") @pytest.mark.parametrize("variant", variants, ids=variants) diff --git a/forge/test/models/pytorch/text/opt/test_opt.py b/forge/test/models/pytorch/text/opt/test_opt.py index 934f9b843..5d2c3fd4b 100644 --- a/forge/test/models/pytorch/text/opt/test_opt.py +++ b/forge/test/models/pytorch/text/opt/test_opt.py @@ -10,7 +10,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_opt_causal_lm(variant, test_device): # Load tokenizer and model from HuggingFace @@ -47,7 +46,6 @@ def test_opt_causal_lm(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_opt_qa(variant, test_device): # Load tokenizer and model from HuggingFace @@ -83,7 +81,6 @@ def test_opt_qa(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_opt_sequence_classification(variant, test_device): # Set Forge configuration parameters diff --git a/forge/test/models/pytorch/text/phi2/test_phi2.py b/forge/test/models/pytorch/text/phi2/test_phi2.py index 960777e36..4da6a2430 100644 --- a/forge/test/models/pytorch/text/phi2/test_phi2.py +++ b/forge/test/models/pytorch/text/phi2/test_phi2.py @@ -17,7 +17,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.xfail(reason="weights.get_dtype() == DataType::BFLOAT16") def test_phi2_clm(variant, test_device): @@ -67,7 +66,6 @@ def test_phi2_clm(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") def test_phi2_token_classification(variant, test_device): @@ -108,7 +106,6 @@ def test_phi2_token_classification(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.xfail(reason="TT_FATAL(weights.get_dtype() == DataType::BFLOAT16) in embedding op") def test_phi2_sequence_classification(variant, test_device): diff --git a/forge/test/models/pytorch/text/phi3/test_phi3.py b/forge/test/models/pytorch/text/phi3/test_phi3.py index 6f4b631f3..b34b75f0d 100644 --- a/forge/test/models/pytorch/text/phi3/test_phi3.py +++ b/forge/test/models/pytorch/text/phi3/test_phi3.py @@ -17,7 +17,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_phi3_causal_lm(variant, test_device): @@ -62,7 +61,6 @@ def test_phi3_causal_lm(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Indices tensor must be in row major layout.") @pytest.mark.parametrize("variant", variants) def test_phi3_token_classification(variant, test_device): @@ -103,7 +101,6 @@ def test_phi3_token_classification(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Embedding Device Operation Layout Mismatch - Expected ROW_MAJOR") @pytest.mark.parametrize("variant", variants) def test_phi3_sequence_classification(variant, test_device): diff --git a/forge/test/models/pytorch/text/qwen/test_qwen.py b/forge/test/models/pytorch/text/qwen/test_qwen.py index 06584eeb2..1b1311066 100644 --- a/forge/test/models/pytorch/text/qwen/test_qwen.py +++ b/forge/test/models/pytorch/text/qwen/test_qwen.py @@ -9,7 +9,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_qwen1_5_causal_lm(test_device): # Set PyBuda configurations @@ -59,7 +58,6 @@ def parse_chat_completion(text: str): @pytest.mark.nightly -@pytest.mark.model_analysis def test_qwen1_5_chat(test_device): # Set PyBuda configurations diff --git a/forge/test/models/pytorch/text/qwen/test_qwen_coder.py b/forge/test/models/pytorch/text/qwen/test_qwen_coder.py index d3d25cdba..341ab6262 100644 --- a/forge/test/models/pytorch/text/qwen/test_qwen_coder.py +++ b/forge/test/models/pytorch/text/qwen/test_qwen_coder.py @@ -18,7 +18,6 @@ @pytest.mark.parametrize("variant", variants, ids=variants) -@pytest.mark.model_analysis @pytest.mark.xfail( reason="RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph - repeat interleave" ) diff --git a/forge/test/models/pytorch/text/qwen/test_qwen_v2.py b/forge/test/models/pytorch/text/qwen/test_qwen_v2.py index 64aab3de0..f004e5422 100644 --- a/forge/test/models/pytorch/text/qwen/test_qwen_v2.py +++ b/forge/test/models/pytorch/text/qwen/test_qwen_v2.py @@ -20,7 +20,6 @@ @pytest.mark.parametrize("variant", variants, ids=variants) -@pytest.mark.model_analysis @pytest.mark.xfail( reason="RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph - repeat interleave" ) diff --git a/forge/test/models/pytorch/text/roberta/test_roberta.py b/forge/test/models/pytorch/text/roberta/test_roberta.py index 7c78f1af4..c156d8d3b 100644 --- a/forge/test/models/pytorch/text/roberta/test_roberta.py +++ b/forge/test/models/pytorch/text/roberta/test_roberta.py @@ -9,7 +9,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_roberta_masked_lm(test_device): # Load Albert tokenizer and model from HuggingFace tokenizer = download_model(AutoTokenizer.from_pretrained, "xlm-roberta-base") @@ -35,7 +34,6 @@ def test_roberta_masked_lm(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_roberta_sentiment_pytorch(test_device): # Load Bart tokenizer and model from HuggingFace tokenizer = download_model(AutoTokenizer.from_pretrained, "cardiffnlp/twitter-roberta-base-sentiment") diff --git a/forge/test/models/pytorch/text/squeezebert/test_squeezebert.py b/forge/test/models/pytorch/text/squeezebert/test_squeezebert.py index 1016f0c1a..ed32a331e 100644 --- a/forge/test/models/pytorch/text/squeezebert/test_squeezebert.py +++ b/forge/test/models/pytorch/text/squeezebert/test_squeezebert.py @@ -8,7 +8,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_squeezebert_sequence_classification_pytorch(test_device): # Load Bart tokenizer and model from HuggingFace tokenizer = download_model(AutoTokenizer.from_pretrained, "squeezebert/squeezebert-mnli") diff --git a/forge/test/models/pytorch/text/t5/test_t5.py b/forge/test/models/pytorch/text/t5/test_t5.py index 42439fc3b..e184be65b 100644 --- a/forge/test/models/pytorch/text/t5/test_t5.py +++ b/forge/test/models/pytorch/text/t5/test_t5.py @@ -13,6 +13,7 @@ from forge.verify.compare import compare_with_golden +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported") def test_t5_loop_tiny_tile(test_device): @@ -96,7 +97,6 @@ def forward(self, decoder_input_ids, encoder_outputs): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_t5_generation(variant, test_device): @@ -190,6 +190,7 @@ def forward( variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="not supported yet") @pytest.mark.parametrize("variant", variants, ids=variants) @@ -375,6 +376,7 @@ def test_t5_past_cache_enc_dec(variant, test_device): variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") @@ -710,6 +712,7 @@ def wrap_generate(inputs): variants = ["t5-small", "t5-base", "t5-large", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large"] +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.skip(reason="Redundant") @@ -759,6 +762,7 @@ def test_t5_forge_pipeline(variant, test_device): print(answer) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Redundant") def test_t5_small_tiny_tile(test_device): diff --git a/forge/test/models/pytorch/text/xglm/test_xglm.py b/forge/test/models/pytorch/text/xglm/test_xglm.py index 6c5410f0a..823029496 100644 --- a/forge/test/models/pytorch/text/xglm/test_xglm.py +++ b/forge/test/models/pytorch/text/xglm/test_xglm.py @@ -11,7 +11,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_xglm_causal_lm(variant, test_device): # Set Forge configuration parameters diff --git a/forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py b/forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py index 059245ed5..e58465ba9 100644 --- a/forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py +++ b/forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py @@ -17,7 +17,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Tensor 4 - stride mismatch: expected [24, 1], got [1, 12]") def test_nbeats_with_seasonality_basis(test_device): compiler_cfg = forge.config._get_global_compiler_config() @@ -45,7 +44,6 @@ def test_nbeats_with_seasonality_basis(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="Failing with pcc=0.83") def test_nbeats_with_generic_basis(test_device): compiler_cfg = forge.config._get_global_compiler_config() @@ -67,7 +65,6 @@ def test_nbeats_with_generic_basis(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="Failing with pcc=0.83") def test_nbeats_with_trend_basis(test_device): compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/vision/alexnet/test_alexnet.py b/forge/test/models/pytorch/vision/alexnet/test_alexnet.py index 9034c0aee..799db7c93 100644 --- a/forge/test/models/pytorch/vision/alexnet/test_alexnet.py +++ b/forge/test/models/pytorch/vision/alexnet/test_alexnet.py @@ -14,7 +14,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_alexnet_torchhub(test_device): # Configurations compiler_cfg = forge.config._get_global_compiler_config() @@ -48,7 +47,6 @@ def test_alexnet_torchhub(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_alexnet_osmr(test_device): # Configurations compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py b/forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py index e4c488d78..377e80a39 100644 --- a/forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py +++ b/forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py @@ -13,7 +13,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_conv_ae_pytorch(test_device): # Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() @@ -41,7 +40,6 @@ def test_conv_ae_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_linear_ae_pytorch(test_device): # Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/vision/blazebase/test_blazepose.py b/forge/test/models/pytorch/vision/blazebase/test_blazepose.py index d521da8a9..d4c79f820 100644 --- a/forge/test/models/pytorch/vision/blazebase/test_blazepose.py +++ b/forge/test/models/pytorch/vision/blazebase/test_blazepose.py @@ -19,6 +19,7 @@ # from mediapipepytorch.visualization import POSE_CONNECTIONS, draw_landmarks +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_blazepose_detector_pytorch(test_device): @@ -41,6 +42,7 @@ def test_blazepose_detector_pytorch(test_device): compiled_model = forge.compile(pose_detector, sample_inputs=[img2], module_name="pt_blazepose_detector") +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_blazepose_regressor_pytorch(test_device): @@ -55,6 +57,7 @@ def test_blazepose_regressor_pytorch(test_device): compiled_model = forge.compile(pose_regressor, sample_inputs=img2, module_name="pt_blazepose_regressor") +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_blaze_palm_pytorch(test_device): @@ -79,6 +82,7 @@ def test_blaze_palm_pytorch(test_device): compiled_model = forge.compile(palm_detector, sample_inputs=[img2], module_name="pt_palm_detector") +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_blaze_hand_pytorch(test_device): diff --git a/forge/test/models/pytorch/vision/bts/test_bts.py b/forge/test/models/pytorch/vision/bts/test_bts.py index c459957e1..cb129281a 100644 --- a/forge/test/models/pytorch/vision/bts/test_bts.py +++ b/forge/test/models/pytorch/vision/bts/test_bts.py @@ -20,6 +20,7 @@ variants = ["densenet161_bts", "densenet121_bts"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.parametrize("variant", variants, ids=variants) @pytest.mark.nightly diff --git a/forge/test/models/pytorch/vision/ddrnet/test_ddrnet.py b/forge/test/models/pytorch/vision/ddrnet/test_ddrnet.py index 844aa5cbf..dfe04463c 100644 --- a/forge/test/models/pytorch/vision/ddrnet/test_ddrnet.py +++ b/forge/test/models/pytorch/vision/ddrnet/test_ddrnet.py @@ -18,6 +18,7 @@ variants = ["ddrnet23s", "ddrnet23", "ddrnet39"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly @@ -68,6 +69,7 @@ def test_ddrnet_pytorch(variant, test_device): variants = ["ddrnet23s_cityscapes", "ddrnet23_cityscapes"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/pytorch/vision/deit/test_deit.py b/forge/test/models/pytorch/vision/deit/test_deit.py index 33c7f1cb4..bf651a99c 100644 --- a/forge/test/models/pytorch/vision/deit/test_deit.py +++ b/forge/test/models/pytorch/vision/deit/test_deit.py @@ -42,9 +42,8 @@ def generate_model_deit_imgcls_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) -def test_vit_base_classify_224_hf_pytorch(variant, test_device): +def test_deit_imgcls_hf_pytorch(variant, test_device): model, inputs, _ = generate_model_deit_imgcls_hf_pytorch( variant, ) diff --git a/forge/test/models/pytorch/vision/densenet/test_densenet.py b/forge/test/models/pytorch/vision/densenet/test_densenet.py index 5a5084c93..e088f0d0c 100644 --- a/forge/test/models/pytorch/vision/densenet/test_densenet.py +++ b/forge/test/models/pytorch/vision/densenet/test_densenet.py @@ -15,7 +15,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_densenet_121_pytorch(variant, test_device): @@ -43,7 +42,6 @@ def test_densenet_121_pytorch(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_densenet_161_pytorch(test_device): # STEP 1: Set Forge configuration parameters @@ -61,7 +59,6 @@ def test_densenet_161_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_densenet_169_pytorch(test_device): # STEP 1: Set Forge configuration parameters @@ -79,7 +76,6 @@ def test_densenet_169_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_densenet_201_pytorch(test_device): # STEP 1: Set Forge configuration parameters diff --git a/forge/test/models/pytorch/vision/detr/test_detr.py b/forge/test/models/pytorch/vision/detr/test_detr.py index 2bf5fc1c3..2485527b1 100644 --- a/forge/test/models/pytorch/vision/detr/test_detr.py +++ b/forge/test/models/pytorch/vision/detr/test_detr.py @@ -16,7 +16,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="AttributeError: has no attribute name_hint") @pytest.mark.parametrize("variant", ["facebook/detr-resnet-50"]) def test_detr_detection(variant): @@ -37,7 +36,6 @@ def test_detr_detection(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="AssertionError: TVM einsum decomposition does not support bqnc,bnchw->bqnhw yet.") @pytest.mark.parametrize("variant", ["facebook/detr-resnet-50-panoptic"]) def test_detr_segmentation(variant): diff --git a/forge/test/models/pytorch/vision/dla/test_dla.py b/forge/test/models/pytorch/vision/dla/test_dla.py index 18aa38a32..67a285142 100644 --- a/forge/test/models/pytorch/vision/dla/test_dla.py +++ b/forge/test/models/pytorch/vision/dla/test_dla.py @@ -37,7 +37,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_dla_pytorch(variant, test_device): diff --git a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py index 96eaaae7a..4cf010489 100644 --- a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py +++ b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py @@ -45,7 +45,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_efficientnet_timm(variant, test_device): @@ -103,7 +102,6 @@ def get_state_dict(self, *args, **kwargs): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.xfail(reason="Runtime Error: Reshape Operation Fails Due to Mismatched Tensor Volume") def test_efficientnet_torchvision(variant, test_device): diff --git a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py index 13fa1850b..3abcb8e2a 100644 --- a/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py +++ b/forge/test/models/pytorch/vision/efficientnet/test_efficientnet_lite.py @@ -11,6 +11,7 @@ from forge.verify.compare import compare_with_golden +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_efficientnet_lite_0_pytorch(test_device): @@ -36,6 +37,7 @@ def test_efficientnet_lite_0_pytorch(test_device): assert all([compare_with_golden(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_efficientnet_lite_1_pytorch(test_device): @@ -64,6 +66,7 @@ def test_efficientnet_lite_1_pytorch(test_device): assert all([compare_with_golden(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_efficientnet_lite_2_pytorch(test_device): @@ -91,6 +94,7 @@ def test_efficientnet_lite_2_pytorch(test_device): assert all([compare_with_golden(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_efficientnet_lite_3_pytorch(test_device): @@ -119,6 +123,7 @@ def test_efficientnet_lite_3_pytorch(test_device): assert all([compare_with_golden(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]) +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_efficientnet_lite_4_pytorch(test_device): diff --git a/forge/test/models/pytorch/vision/fchardnet/test_fchardnet.py b/forge/test/models/pytorch/vision/fchardnet/test_fchardnet.py index fbba2137e..23e74d2a2 100644 --- a/forge/test/models/pytorch/vision/fchardnet/test_fchardnet.py +++ b/forge/test/models/pytorch/vision/fchardnet/test_fchardnet.py @@ -14,6 +14,7 @@ # from fchardnet import get_model, fuse_bn_recursively +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_fchardnet(test_device): diff --git a/forge/test/models/pytorch/vision/fpn/test_fpn.py b/forge/test/models/pytorch/vision/fpn/test_fpn.py index 3a5c34d8b..8571a3fe5 100644 --- a/forge/test/models/pytorch/vision/fpn/test_fpn.py +++ b/forge/test/models/pytorch/vision/fpn/test_fpn.py @@ -8,7 +8,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_fpn_pytorch(test_device): compiler_cfg = forge.config._get_global_compiler_config() compiler_cfg.compile_depth = forge.CompileDepth.SPLIT_GRAPH diff --git a/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py b/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py index bda826cd3..1cef146b3 100644 --- a/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py +++ b/forge/test/models/pytorch/vision/ghostnet/test_ghostnet.py @@ -20,7 +20,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="Runtime error : Invalid arguments to reshape") @pytest.mark.parametrize("variant", variants, ids=variants) def test_ghostnet_timm(variant, test_device): diff --git a/forge/test/models/pytorch/vision/googlenet/test_googlenet.py b/forge/test/models/pytorch/vision/googlenet/test_googlenet.py index 58a5408b7..e9225d059 100644 --- a/forge/test/models/pytorch/vision/googlenet/test_googlenet.py +++ b/forge/test/models/pytorch/vision/googlenet/test_googlenet.py @@ -12,7 +12,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_googlenet_pytorch(test_device): # Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object diff --git a/forge/test/models/pytorch/vision/hardnet/test_hardnet.py b/forge/test/models/pytorch/vision/hardnet/test_hardnet.py index cf0f26913..86aa30a36 100644 --- a/forge/test/models/pytorch/vision/hardnet/test_hardnet.py +++ b/forge/test/models/pytorch/vision/hardnet/test_hardnet.py @@ -22,6 +22,7 @@ ] +@pytest.mark.skip_model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly @pytest.mark.skip(reason="dependent on CCM repo") diff --git a/forge/test/models/pytorch/vision/hrnet/test_hrnet.py b/forge/test/models/pytorch/vision/hrnet/test_hrnet.py index 959c81da5..421702c81 100644 --- a/forge/test/models/pytorch/vision/hrnet/test_hrnet.py +++ b/forge/test/models/pytorch/vision/hrnet/test_hrnet.py @@ -83,7 +83,6 @@ def generate_model_hrnet_imgcls_osmr_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_hrnet_osmr_pytorch(test_device, variant): model, inputs, _ = generate_model_hrnet_imgcls_osmr_pytorch( @@ -149,7 +148,6 @@ def generate_model_hrnet_imgcls_timm_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_hrnet_timm_pytorch(test_device, variant): model, inputs, _ = generate_model_hrnet_imgcls_timm_pytorch( diff --git a/forge/test/models/pytorch/vision/inception/test_inception_v4.py b/forge/test/models/pytorch/vision/inception/test_inception_v4.py index 69927e39a..9be3df367 100644 --- a/forge/test/models/pytorch/vision/inception/test_inception_v4.py +++ b/forge/test/models/pytorch/vision/inception/test_inception_v4.py @@ -32,7 +32,6 @@ def generate_model_inceptionV4_imgcls_osmr_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_inception_v4_osmr_pytorch(test_device): model, inputs = generate_model_inceptionV4_imgcls_osmr_pytorch("inceptionv4") compiled_model = forge.compile(model, sample_inputs=inputs, module_name="pt_osmr_inception_v4") @@ -49,7 +48,6 @@ def generate_model_inceptionV4_imgcls_timm_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_inception_v4_timm_pytorch(test_device): model, inputs = generate_model_inceptionV4_imgcls_timm_pytorch("inception_v4") diff --git a/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py b/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py index b4f4d3c3c..d9dc5c418 100644 --- a/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py +++ b/forge/test/models/pytorch/vision/mlp_mixer/test_mlp_mixer.py @@ -31,7 +31,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", varaints, ids=varaints) def test_mlp_mixer_timm_pytorch(variant, test_device): diff --git a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py index f4bf20900..a11185482 100644 --- a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py +++ b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1.py @@ -30,7 +30,6 @@ def generate_model_mobilenetV1_base_custom_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="RuntimeError: Invalid arguments to reshape") def test_mobilenetv1_basic(test_device): model, inputs, _ = generate_model_mobilenetV1_base_custom_pytorch( @@ -69,7 +68,6 @@ def generate_model_mobilenetv1_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv1_192(test_device): model, inputs, _ = generate_model_mobilenetv1_imgcls_hf_pytorch( test_device, @@ -98,7 +96,6 @@ def generate_model_mobilenetV1I224_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv1_224(test_device): model, inputs, _ = generate_model_mobilenetV1I224_imgcls_hf_pytorch( test_device, diff --git a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1_ssd.py b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1_ssd.py index dc48cdee5..099a9a22c 100644 --- a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1_ssd.py +++ b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v1_ssd.py @@ -10,6 +10,7 @@ # from mobilenetv1_ssd.vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_mobilenet_v1_ssd_pytorch_1x1(test_device): diff --git a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v2.py b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v2.py index 5069af5d2..089746335 100644 --- a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v2.py +++ b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v2.py @@ -41,7 +41,6 @@ def generate_model_mobilenetV2_imgcls_torchhub_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv2_basic(test_device): model, inputs, _ = generate_model_mobilenetV2_imgcls_torchhub_pytorch( test_device, @@ -68,7 +67,6 @@ def generate_model_mobilenetV2I96_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv2_96(test_device): model, inputs, _ = generate_model_mobilenetV2I96_imgcls_hf_pytorch( test_device, @@ -95,7 +93,6 @@ def generate_model_mobilenetV2I160_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv2_160(test_device): model, inputs, _ = generate_model_mobilenetV2I160_imgcls_hf_pytorch( test_device, @@ -124,7 +121,6 @@ def generate_model_mobilenetV2I244_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv2_224(test_device): model, inputs, _ = generate_model_mobilenetV2I244_imgcls_hf_pytorch( test_device, @@ -162,7 +158,6 @@ def generate_model_mobilenetV2_imgcls_timm_pytorch(test_device, variant): # @pytest.mark.xfail(reason="Runtime error : Invalid arguments to reshape") @pytest.mark.nightly -@pytest.mark.model_analysis def test_mobilenetv2_timm(test_device): model, inputs, _ = generate_model_mobilenetV2_imgcls_timm_pytorch( test_device, @@ -215,7 +210,6 @@ def generate_model_mobilenetV2_semseg_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_mobilenetv2_deeplabv3(variant, test_device): model, inputs, _ = generate_model_mobilenetV2_semseg_hf_pytorch( diff --git a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v3.py b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v3.py index ba54a85e4..f48cac71e 100644 --- a/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v3.py +++ b/forge/test/models/pytorch/vision/mobilenet/test_mobilenet_v3.py @@ -41,7 +41,6 @@ def generate_model_mobilenetV3_imgcls_torchhub_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="Runtime error : Invalid arguments to reshape") @pytest.mark.parametrize("variant", variants, ids=variants) def test_mobilenetv3_basic(variant, test_device): @@ -95,7 +94,6 @@ def generate_model_mobilenetV3_imgcls_timm_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail(reason="Runtime error : Invalid arguments to reshape") @pytest.mark.parametrize("variant", variants, ids=variants) def test_mobilenetv3_timm(variant, test_device): diff --git a/forge/test/models/pytorch/vision/monodle/test_monodle.py b/forge/test/models/pytorch/vision/monodle/test_monodle.py index 0de6b679e..2eec7dbae 100644 --- a/forge/test/models/pytorch/vision/monodle/test_monodle.py +++ b/forge/test/models/pytorch/vision/monodle/test_monodle.py @@ -11,7 +11,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_monodle_pytorch(test_device): # PyBuda configuration parameters compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/vision/openpose/test_openpose.py b/forge/test/models/pytorch/vision/openpose/test_openpose.py index 91af364ea..a47017cc9 100644 --- a/forge/test/models/pytorch/vision/openpose/test_openpose.py +++ b/forge/test/models/pytorch/vision/openpose/test_openpose.py @@ -51,6 +51,7 @@ def generate_model_openpose_posdet_custom_pytorch(test_device, variant): return framework_model, [img_tensor], {} +@pytest.mark.skip_model_analysis @pytest.mark.parametrize("variant", variants) @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly @@ -87,6 +88,7 @@ def generate_model_openpose_posdet_osmr_pytorch(test_device, variant): ] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/pytorch/vision/perceiverio/test_perceiverio.py b/forge/test/models/pytorch/vision/perceiverio/test_perceiverio.py index 115cd7420..83411f9d0 100644 --- a/forge/test/models/pytorch/vision/perceiverio/test_perceiverio.py +++ b/forge/test/models/pytorch/vision/perceiverio/test_perceiverio.py @@ -45,7 +45,6 @@ def get_sample_data(model_name): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_perceiverio_for_image_classification_pytorch(test_device, variant): diff --git a/forge/test/models/pytorch/vision/pidnet/test_pidnet.py b/forge/test/models/pytorch/vision/pidnet/test_pidnet.py index d7de23fb0..e92f147e0 100644 --- a/forge/test/models/pytorch/vision/pidnet/test_pidnet.py +++ b/forge/test/models/pytorch/vision/pidnet/test_pidnet.py @@ -18,6 +18,7 @@ variants = ["pidnet_s", "pidnet_m", "pidnet_l"] +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.parametrize("variant", variants) @pytest.mark.nightly diff --git a/forge/test/models/pytorch/vision/rcnn/test_rcnn.py b/forge/test/models/pytorch/vision/rcnn/test_rcnn.py index 9613a6061..c47154771 100644 --- a/forge/test/models/pytorch/vision/rcnn/test_rcnn.py +++ b/forge/test/models/pytorch/vision/rcnn/test_rcnn.py @@ -17,7 +17,6 @@ # Paper - https://arxiv.org/abs/1311.2524 # Repo - https://github.com/object-detection-algorithm/R-CNN @pytest.mark.nightly -@pytest.mark.model_analysis def test_rcnn_pytorch(test_device): # Load Alexnet Model diff --git a/forge/test/models/pytorch/vision/regnet/test_regnet.py b/forge/test/models/pytorch/vision/regnet/test_regnet.py index 092b0bba2..781943270 100644 --- a/forge/test/models/pytorch/vision/regnet/test_regnet.py +++ b/forge/test/models/pytorch/vision/regnet/test_regnet.py @@ -9,7 +9,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail( reason="RuntimeError: TT_FATAL @ tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp:474: new_volume == old_volume. Tracking similar issue on ResNet tenstorrent/tt-mlir#1574 " ) @@ -32,7 +31,6 @@ def test_regnet(variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail( reason="RuntimeError: TT_FATAL @ tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp:474: new_volume == old_volume. Tracking similar issue on ResNet tenstorrent/tt-mlir#1574 " ) diff --git a/forge/test/models/pytorch/vision/resnet/test_resnet.py b/forge/test/models/pytorch/vision/resnet/test_resnet.py index a4b2cf68f..0d0cadf99 100644 --- a/forge/test/models/pytorch/vision/resnet/test_resnet.py +++ b/forge/test/models/pytorch/vision/resnet/test_resnet.py @@ -47,7 +47,6 @@ def generate_model_resnet_imgcls_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnet(test_device): model, inputs, _ = generate_model_resnet_imgcls_hf_pytorch( @@ -84,7 +83,6 @@ def generate_model_resnet_imgcls_timm_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnet_timm(test_device): model, inputs, _ = generate_model_resnet_imgcls_timm_pytorch( "resnet50", diff --git a/forge/test/models/pytorch/vision/resnext/test_resnext.py b/forge/test/models/pytorch/vision/resnext/test_resnext.py index 13b13093f..79d50f147 100644 --- a/forge/test/models/pytorch/vision/resnext/test_resnext.py +++ b/forge/test/models/pytorch/vision/resnext/test_resnext.py @@ -14,7 +14,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_50_torchhub_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object @@ -33,7 +32,6 @@ def test_resnext_50_torchhub_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_101_torchhub_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object @@ -52,7 +50,6 @@ def test_resnext_101_torchhub_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_101_32x8d_fb_wsl_pytorch(test_device): # STEP 1: Set Forge configuration parameters @@ -73,7 +70,6 @@ def test_resnext_101_32x8d_fb_wsl_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_14_osmr_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object @@ -93,7 +89,6 @@ def test_resnext_14_osmr_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_26_osmr_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object @@ -112,7 +107,6 @@ def test_resnext_26_osmr_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_50_osmr_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object @@ -131,7 +125,6 @@ def test_resnext_50_osmr_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_resnext_101_osmr_pytorch(test_device): # STEP 1: Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() # load global compiler config object diff --git a/forge/test/models/pytorch/vision/retinanet/test_retinanet.py b/forge/test/models/pytorch/vision/retinanet/test_retinanet.py index 18e74333d..6a5a803f6 100644 --- a/forge/test/models/pytorch/vision/retinanet/test_retinanet.py +++ b/forge/test/models/pytorch/vision/retinanet/test_retinanet.py @@ -23,7 +23,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_retinanet(variant, test_device): diff --git a/forge/test/models/pytorch/vision/segformer/test_segformer.py b/forge/test/models/pytorch/vision/segformer/test_segformer.py index 45c2982d5..1c397d057 100644 --- a/forge/test/models/pytorch/vision/segformer/test_segformer.py +++ b/forge/test/models/pytorch/vision/segformer/test_segformer.py @@ -22,7 +22,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants_img_classification) def test_segformer_image_classification_pytorch(test_device, variant): @@ -58,7 +57,6 @@ def test_segformer_image_classification_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants_semseg) def test_segformer_semantic_segmentation_pytorch(test_device, variant): diff --git a/forge/test/models/pytorch/vision/ssd300_resnet50/test_ssd300_resnet50.py b/forge/test/models/pytorch/vision/ssd300_resnet50/test_ssd300_resnet50.py index 179f617b8..5782f24c1 100644 --- a/forge/test/models/pytorch/vision/ssd300_resnet50/test_ssd300_resnet50.py +++ b/forge/test/models/pytorch/vision/ssd300_resnet50/test_ssd300_resnet50.py @@ -11,7 +11,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis def test_pytorch_ssd300_resnet50(test_device): # STEP 1 : Set Forge configuration parameters diff --git a/forge/test/models/pytorch/vision/swin/test_swin.py b/forge/test/models/pytorch/vision/swin/test_swin.py index e50d3afa8..c6818c8a8 100644 --- a/forge/test/models/pytorch/vision/swin/test_swin.py +++ b/forge/test/models/pytorch/vision/swin/test_swin.py @@ -13,7 +13,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.xfail( reason='RuntimeError: TT_ASSERT @ forge/csrc/passes/commute_utils.cpp:1105: reshape->op_name() == "reshape"' ) @@ -38,7 +37,7 @@ def test_swin_v1_tiny_4_224_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis +@pytest.mark.skip_model_analysis @pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") @pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) def test_swin_v2_tiny_4_256_hf_pytorch(variant): @@ -56,7 +55,7 @@ def test_swin_v2_tiny_4_256_hf_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis +@pytest.mark.skip_model_analysis @pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") @pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) def test_swin_v2_tiny_image_classification(variant): @@ -74,7 +73,7 @@ def test_swin_v2_tiny_image_classification(variant): @pytest.mark.nightly -@pytest.mark.model_analysis +@pytest.mark.skip_model_analysis @pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") @pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) def test_swin_v2_tiny_masked(variant): diff --git a/forge/test/models/pytorch/vision/tri/test_tri_basic_2.py b/forge/test/models/pytorch/vision/tri/test_tri_basic_2.py index 8e9ba514c..08fc296a2 100644 --- a/forge/test/models/pytorch/vision/tri/test_tri_basic_2.py +++ b/forge/test/models/pytorch/vision/tri/test_tri_basic_2.py @@ -14,6 +14,7 @@ # from semseg_tri import resnet34_semseg +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo and Hang observed at post_initial_graph_pass") @pytest.mark.nightly def test_tri_basic_2_sematic_segmentation_pytorch(test_device): diff --git a/forge/test/models/pytorch/vision/unet/test_unet.py b/forge/test/models/pytorch/vision/unet/test_unet.py index 27a999670..562643f05 100644 --- a/forge/test/models/pytorch/vision/unet/test_unet.py +++ b/forge/test/models/pytorch/vision/unet/test_unet.py @@ -31,7 +31,6 @@ def generate_model_unet_imgseg_osmr_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_unet_osmr_cityscape_pytorch(test_device): model, inputs, _ = generate_model_unet_imgseg_osmr_pytorch( "unet_cityscapes", @@ -66,6 +65,7 @@ def get_imagenet_sample(): return img_tensor +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="Model script not found") @pytest.mark.nightly def test_unet_holocron_pytorch(test_device): @@ -113,7 +113,6 @@ def generate_model_unet_imgseg_smp_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_unet_qubvel_pytorch(test_device): model, inputs, _ = generate_model_unet_imgseg_smp_pytorch( None, @@ -161,7 +160,6 @@ def generate_model_unet_imgseg_torchhub_pytorch(variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_unet_torchhub_pytorch(test_device): model, inputs, _ = generate_model_unet_imgseg_torchhub_pytorch( "unet", diff --git a/forge/test/models/pytorch/vision/vgg/test_vgg.py b/forge/test/models/pytorch/vision/vgg/test_vgg.py index 730ffd008..72e27c0e6 100644 --- a/forge/test/models/pytorch/vision/vgg/test_vgg.py +++ b/forge/test/models/pytorch/vision/vgg/test_vgg.py @@ -24,7 +24,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_vgg_osmr_pytorch(variant, test_device): # STEP 1: Set Forge configuration parameters @@ -58,7 +57,6 @@ def test_vgg_osmr_pytorch(variant, test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_vgg_19_hf_pytorch(test_device): # STEP 1: Set Forge configuration parameters @@ -118,7 +116,6 @@ def preprocess_timm_model(model_name): @pytest.mark.nightly -@pytest.mark.model_analysis def test_vgg_bn19_timm_pytorch(test_device): torch.multiprocessing.set_sharing_strategy("file_system") model_name = "vgg19_bn" @@ -132,7 +129,6 @@ def test_vgg_bn19_timm_pytorch(test_device): @pytest.mark.nightly -@pytest.mark.model_analysis def test_vgg_bn19_torchhub_pytorch(test_device): # STEP 1: Set Forge configuration parameters diff --git a/forge/test/models/pytorch/vision/vit/test_vit.py b/forge/test/models/pytorch/vision/vit/test_vit.py index 8360774f1..920ba1fb2 100644 --- a/forge/test/models/pytorch/vision/vit/test_vit.py +++ b/forge/test/models/pytorch/vision/vit/test_vit.py @@ -35,7 +35,6 @@ def generate_model_vit_imgcls_hf_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_vit_classify_224_hf_pytorch(variant, test_device): model, inputs, _ = generate_model_vit_imgcls_hf_pytorch( diff --git a/forge/test/models/pytorch/vision/vovnet/test_vovnet.py b/forge/test/models/pytorch/vision/vovnet/test_vovnet.py index bed471652..8be0eebaf 100644 --- a/forge/test/models/pytorch/vision/vovnet/test_vovnet.py +++ b/forge/test/models/pytorch/vision/vovnet/test_vovnet.py @@ -28,7 +28,6 @@ def generate_model_vovnet_imgcls_osmr_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", varaints, ids=varaints) def test_vovnet_osmr_pytorch(variant, test_device): model, inputs, _ = generate_model_vovnet_imgcls_osmr_pytorch( @@ -49,7 +48,6 @@ def generate_model_vovnet39_imgcls_stigma_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("enable_default_dram_parameters", [True, False]) def test_vovnet_v1_39_stigma_pytorch(test_device, enable_default_dram_parameters): model, inputs, _ = generate_model_vovnet39_imgcls_stigma_pytorch( @@ -72,7 +70,6 @@ def generate_model_vovnet57_imgcls_stigma_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis def test_vovnet_v1_57_stigma_pytorch(test_device): model, inputs, _ = generate_model_vovnet57_imgcls_stigma_pytorch( test_device, @@ -94,7 +91,6 @@ def generate_model_vovnet_imgcls_timm_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_vovnet_timm_pytorch(variant, test_device): model, inputs, _ = generate_model_vovnet_imgcls_timm_pytorch( diff --git a/forge/test/models/pytorch/vision/wideresnet/test_wideresnet.py b/forge/test/models/pytorch/vision/wideresnet/test_wideresnet.py index f6815effa..9b0410fd3 100644 --- a/forge/test/models/pytorch/vision/wideresnet/test_wideresnet.py +++ b/forge/test/models/pytorch/vision/wideresnet/test_wideresnet.py @@ -46,7 +46,6 @@ def generate_model_wideresnet_imgcls_pytorch(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_wideresnet_pytorch(variant, test_device): (model, inputs,) = generate_model_wideresnet_imgcls_pytorch( @@ -83,7 +82,6 @@ def generate_model_wideresnet_imgcls_timm(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_wideresnet_timm(variant, test_device): (model, inputs,) = generate_model_wideresnet_imgcls_timm( diff --git a/forge/test/models/pytorch/vision/xception/test_xception.py b/forge/test/models/pytorch/vision/xception/test_xception.py index 62408acee..8a382d1e6 100644 --- a/forge/test/models/pytorch/vision/xception/test_xception.py +++ b/forge/test/models/pytorch/vision/xception/test_xception.py @@ -39,7 +39,6 @@ def generate_model_xception_imgcls_timm(test_device, variant): @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants, ids=variants) def test_xception_timm(variant, test_device): diff --git a/forge/test/models/pytorch/vision/yolo/test_yolo_v3.py b/forge/test/models/pytorch/vision/yolo/test_yolo_v3.py index cb568b61c..da53773cf 100644 --- a/forge/test/models/pytorch/vision/yolo/test_yolo_v3.py +++ b/forge/test/models/pytorch/vision/yolo/test_yolo_v3.py @@ -35,6 +35,7 @@ def generate_model_yolotinyV3_imgcls_holli_pytorch(test_device, variant): return model, [img_tensor], {} +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_yolov3_tiny_holli_pytorch(test_device): @@ -67,6 +68,7 @@ def generate_model_yoloV3_imgcls_holli_pytorch(test_device, variant): return model, [img_tensor], {"pcc": pcc} +@pytest.mark.skip_model_analysis @pytest.mark.skip(reason="dependent on CCM repo") @pytest.mark.nightly def test_yolov3_holli_pytorch(test_device): diff --git a/forge/test/models/pytorch/vision/yolo/test_yolo_v5.py b/forge/test/models/pytorch/vision/yolo/test_yolo_v5.py index a7ac40320..93fa860fe 100644 --- a/forge/test/models/pytorch/vision/yolo/test_yolo_v5.py +++ b/forge/test/models/pytorch/vision/yolo/test_yolo_v5.py @@ -25,7 +25,6 @@ def generate_model_yoloV5I320_imgcls_torchhub_pytorch(test_device, variant, size @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size]) def test_yolov5_320x320(test_device, size): model, inputs, _ = generate_model_yoloV5I320_imgcls_torchhub_pytorch( @@ -55,7 +54,6 @@ def generate_model_yoloV5I640_imgcls_torchhub_pytorch(test_device, variant, size @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size]) def test_yolov5_640x640(test_device, size): @@ -81,7 +79,6 @@ def generate_model_yoloV5I480_imgcls_torchhub_pytorch(test_device, variant, size @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size]) def test_yolov5_480x480(test_device, size): @@ -96,7 +93,6 @@ def test_yolov5_480x480(test_device, size): @pytest.mark.nightly -@pytest.mark.model_analysis def test_yolov5_1280x1280(test_device): compiler_cfg = forge.config._get_global_compiler_config() diff --git a/forge/test/models/pytorch/vision/yolo/test_yolo_v6.py b/forge/test/models/pytorch/vision/yolo/test_yolo_v6.py index 9cd47f45a..9a1ddb82d 100644 --- a/forge/test/models/pytorch/vision/yolo/test_yolo_v6.py +++ b/forge/test/models/pytorch/vision/yolo/test_yolo_v6.py @@ -14,7 +14,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_yolo_v6_pytorch(variant, test_device): diff --git a/forge/test/models/pytorch/vision/yolo/test_yolox.py b/forge/test/models/pytorch/vision/yolo/test_yolox.py index 90064150b..7c29fa06c 100644 --- a/forge/test/models/pytorch/vision/yolo/test_yolox.py +++ b/forge/test/models/pytorch/vision/yolo/test_yolox.py @@ -34,7 +34,6 @@ @pytest.mark.nightly -@pytest.mark.model_analysis @pytest.mark.parametrize("variant", variants) def test_yolox_pytorch(variant, test_device): diff --git a/forge/test/models/tflite/vision/efficientnet_lite/test_efficientnet_lite.py b/forge/test/models/tflite/vision/efficientnet_lite/test_efficientnet_lite.py index b3f8d1f7a..eed269a27 100644 --- a/forge/test/models/tflite/vision/efficientnet_lite/test_efficientnet_lite.py +++ b/forge/test/models/tflite/vision/efficientnet_lite/test_efficientnet_lite.py @@ -7,6 +7,7 @@ import forge +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="dependent on CCM repo") def test_efficientnet_lite0_1x1(test_device): @@ -16,6 +17,7 @@ def test_efficientnet_lite0_1x1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="dependent on CCM repo") def test_efficientnet_lite4_1x1(test_device): @@ -25,6 +27,7 @@ def test_efficientnet_lite4_1x1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="dependent on CCM repo") def test_efficientnet_lite0(test_device): @@ -34,6 +37,7 @@ def test_efficientnet_lite0(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_efficientnet_lite1(test_device): @@ -43,6 +47,7 @@ def test_efficientnet_lite1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_efficientnet_lite2(test_device): @@ -52,6 +57,7 @@ def test_efficientnet_lite2(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_efficientnet_lite3(test_device): @@ -61,6 +67,7 @@ def test_efficientnet_lite3(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_efficientnet_lite4(test_device): diff --git a/forge/test/models/tflite/vision/hand_landmark/test_hand_landmarker.py b/forge/test/models/tflite/vision/hand_landmark/test_hand_landmarker.py index 2c9d0b1a4..338746dcd 100644 --- a/forge/test/models/tflite/vision/hand_landmark/test_hand_landmarker.py +++ b/forge/test/models/tflite/vision/hand_landmark/test_hand_landmarker.py @@ -7,6 +7,7 @@ import forge +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_hand_landmark_lite_1x1(test_device): @@ -15,6 +16,7 @@ def test_hand_landmark_lite_1x1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_palm_detection_lite_1x1(test_device): diff --git a/forge/test/models/tflite/vision/mobilenet_ssd/test_mobilenet_ssd.py b/forge/test/models/tflite/vision/mobilenet_ssd/test_mobilenet_ssd.py index a5f96357e..7023dea0b 100644 --- a/forge/test/models/tflite/vision/mobilenet_ssd/test_mobilenet_ssd.py +++ b/forge/test/models/tflite/vision/mobilenet_ssd/test_mobilenet_ssd.py @@ -7,6 +7,7 @@ import forge +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_mobilenet_ssd_1x1(test_device): diff --git a/forge/test/models/tflite/vision/pose_landmark/test_pose_landmark.py b/forge/test/models/tflite/vision/pose_landmark/test_pose_landmark.py index 3fb9acdbf..6915d0b31 100644 --- a/forge/test/models/tflite/vision/pose_landmark/test_pose_landmark.py +++ b/forge/test/models/tflite/vision/pose_landmark/test_pose_landmark.py @@ -7,6 +7,7 @@ import forge +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_pose_landmark_lite_1x1(test_device): @@ -17,6 +18,7 @@ def test_pose_landmark_lite_1x1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_pose_landmark_heavy_1x1(test_device): @@ -27,6 +29,7 @@ def test_pose_landmark_heavy_1x1(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_pose_landmark_lite(test_device): @@ -36,6 +39,7 @@ def test_pose_landmark_lite(test_device): compiled_model = forge.compile(tflite_path, sample_inputs=sample_tensor) +@pytest.mark.skip_model_analysis @pytest.mark.nightly @pytest.mark.skip(reason="Not supported yet") def test_pose_landmark_heavy(test_device): diff --git a/forge/test/models_ops/__init__.py b/forge/test/models_ops/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pytest.ini b/pytest.ini index f307b91d4..01d3529e3 100644 --- a/pytest.ini +++ b/pytest.ini @@ -14,7 +14,7 @@ markers = nightly_sweeps: marks tests as nightly_sweeps slow: marks tests as slow # deprecated - slow tests, should not be run in push pipeline run_in_pp: marks tests as run_in_pp # deprecated - tests that should run in push pipeline - model_analysis: marks tests as model_analysis + skip_model_analysis: marks tests as skip_model_analysis # Where pytest should look for tests testpaths = @@ -52,5 +52,8 @@ testpaths = # Sweeps forge/test/operators/pytorch + # Models Ops test generated by extracting the unique ops configuration across all the models inside forge/test/models path + forge/test/models_ops + filterwarnings = ignore::DeprecationWarning diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/model_analysis.py b/scripts/model_analysis.py deleted file mode 100644 index f5ef8bc76..000000000 --- a/scripts/model_analysis.py +++ /dev/null @@ -1,1233 +0,0 @@ -# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC -# -# SPDX-License-Identifier: Apache-2.0 -import subprocess -import os -import time -import json -from loguru import logger -import math -import argparse -import pandas as pd -from tabulate import tabulate -from enum import IntEnum, Enum -from typing import Union, Dict, List, Tuple -from dataclasses import dataclass, asdict -import inspect -import ast - -import torch - -from forge.tvm_unique_op_generation import Operation, NodeType, UniqueOperations - - -class CompilerComponent(IntEnum): - FORGE = 0 - MLIR = 1 - TT_METAL = 2 - UNKNOWN = 4 - - -class MatchingExceptionRule: - """ - Represents a rule for matching exceptions based on specific tokens. - - Attributes: - rule_name (str): Name of the rule. - rule_tokens (List[str]): List of tokens to match in an exception message. - """ - - def __init__(self, rule_name: str, rule_tokens: List[str]): - self.rule_name = rule_name - self.rule_tokens = rule_tokens - - def match_rule(self, exception: str): - """ - Matches the rule tokens against the given exception string. - - Args: - exception (str): Exception message to match against. - """ - # Check if all tokens in rule_tokens exist in the exception message and - # return the rule_token if matches otherwise return None - matched_token = all([True if token in exception else False for token in self.rule_tokens]) - if matched_token: - return " ".join(self.rule_tokens) - else: - return None - - -class MatchingCompilerComponentException: - """ - Represents exception matching for a specific compiler component. - - Attributes: - compiler_component (CompilerComponent): Compiler component associated with this exception matching. - exception_rules (List[MatchingExceptionRule]): List of exception rules for this component. - """ - - def __init__(self, compiler_component: CompilerComponent, exception_rules: List[MatchingExceptionRule]): - self.compiler_component = compiler_component - self.exception_rules = exception_rules - - def match_rule(self, exception: str): - """ - Matches the given exception against the exception rules of this compiler component. - Args: - exception (str): Exception message to be checked against the rules. - """ - # Iterate over all exception rules for this compiler component. - for rule in self.exception_rules: - # Attempt to match the current rule against the exception and If a match is found, - # return the compiler component and the constructed error message. - if rule.match_rule(exception) is not None: - match_err_msg = ( - f"[{self.compiler_component.name}] " - if rule.rule_name is None or rule.rule_name == "" - else f"[{self.compiler_component.name}][{rule.rule_name}] " - ) - match_err_msg += rule.match_rule(exception) - return self.compiler_component, match_err_msg - - return None, None - - -common_failure_matching_rules_list = [ - MatchingCompilerComponentException( - CompilerComponent.FORGE, - [ - MatchingExceptionRule( - "forge_module evaluation", ["AssertionError", "Setting a tensor value of incorrect shape"] - ), - MatchingExceptionRule( - "embedding indicies tensor", - ["IndexError", "forge/forge/op/eval/forge/embedding.py", "index out of range in self"], - ), - MatchingExceptionRule( - "post_initial_graph_passes", - [ - "RuntimeError", - "has_newstyle_interface(std::get(type), false)", - "decomposing a type with old OpType interface, expects new OpType interface", - ], - ), - MatchingExceptionRule( - "lower_to_mlir", - ["RuntimeError", "Found Unsupported operations while lowering from TTForge to TTIR in forward graph"], - ), - MatchingExceptionRule( - "lower_to_mlir", - ["RuntimeError", "Unsupported data format during lowering from TTForge to TTIR"], - ), - MatchingExceptionRule( - "mlir generation failure", ["RuntimeError", "Generated MLIR module failed verification"] - ), - MatchingExceptionRule( - "Convert tt-forge attribute to an MLIR attribute", ["RuntimeError", "Unhandled attribute type"] - ), - MatchingExceptionRule("Runtime Datatype Unsupported", ["RuntimeError", "Unhandled dtype Bool"]), - # Compiled model Runtime - MatchingExceptionRule( - "Runtime Datatype mismatch", ["RuntimeError", "Tensor", "data type mismatch: expected", "got"] - ), - MatchingExceptionRule( - "Runtime Shape mismatch", ["RuntimeError", "Tensor", "shape mismatch: expected", "got"] - ), - MatchingExceptionRule( - "Runtime stride mismatch", ["RuntimeError", "Tensor", "stride mismatch: expected", "got"] - ), - MatchingExceptionRule( - "Runtime Input count mismatch", ["RuntimeError", "Input count mismatch: expected", "got"] - ), - MatchingExceptionRule( - "post_const_eval_tensors", ["RuntimeError", "unsupported memory format option Contiguous"] - ), - ], - ), - MatchingCompilerComponentException( - CompilerComponent.MLIR, - [ - MatchingExceptionRule( - "TTIR to TTNN Conv2dOpConversionPattern", - [ - "tt_forge_signal_handler", - "tt-mlir/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp", - "Conv2dOpConversionPattern::matchAndRewrite(ttir::Conv2dOp, OpAdaptor, ConversionPatternRewriter &)", - "adaptor.getPaddingBottom() == adaptor.getPaddingTop()", - "TTNN only supports padding height/width attributes. Thus, padding_top", - "must equal padding_bottom for the op to execute as expected", - ], - ), - MatchingExceptionRule( - "ttnn.reshape mlir pipeline", - [ - "RuntimeError", - "'ttnn.reshape' op Shape attribute size must match output tensor rank", - "Failed to run MLIR compiler pass pipeline", - ], - ), - MatchingExceptionRule( - "ttnn.maxpool2d mlir pipeline", - [ - "RuntimeError", - "ttnn.max_pool2d currently only supports an input type of bfloat16", - "Failed to run MLIR compiler pass pipeline", - ], - ), - MatchingExceptionRule("mlir pipeline", ["RuntimeError", "Failed to run MLIR compiler pass pipeline"]), - MatchingExceptionRule( - "MLIR runtime ttnn ", ["tt::exception", "tt-mlir/runtime/lib/ttnn/runtime.cpp", "Unsupported data type"] - ), - ], - ), - MatchingCompilerComponentException( - CompilerComponent.TT_METAL, - [ - MatchingExceptionRule( - "TT-Metal vs Forge Output Data mismatch", - [ - "ValueError", - "Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model", - ", compiled_model", - ], - ), - MatchingExceptionRule( - "ttnn.tilize validation", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/tilize/device/tilize_op.cpp", - "input_tensor_a.get_dtype() == DataType::BFLOAT16", - ], - ), - MatchingExceptionRule( - "ttnn.tilize_with_val_padding validation", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/device/tilize_with_val_padding_op.cpp", - "input_tensor_a.get_dtype() == DataType::BFLOAT16 or input_tensor_a.get_dtype() == DataType::UINT32", - "Can only tilize bfloat16 or uint32 tensors", - ], - ), - MatchingExceptionRule( - "ttnn.embedding validation", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp", - "weights.get_dtype() == DataType::BFLOAT16", - ], - ), - MatchingExceptionRule( - "ttnn.embedding validation", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp", - "a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16", - "Input must be UINT32 or BFLOAT16", - ], - ), - MatchingExceptionRule( - "ttnn elementwise binary", ["RuntimeError", "BinaryOpType cannot be mapped to BcastOpMath"] - ), - MatchingExceptionRule( - "ttnn elementwise binary", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp", - "ttnn::operations::binary::BinaryDeviceOperation: unsupported broadcast", - ], - ), - MatchingExceptionRule( - "ttnn.concat validation", - ["RuntimeError", "Tile padding along concatenated dim", "not supported for concat yet"], - ), - MatchingExceptionRule( - "ttnn.reshape validation", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/device/reshape_op.cpp", - "input_tensor_a.get_dtype() == DataType::BFLOAT16", - ], - ), - MatchingExceptionRule( - "ttnn.matmul", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp", - "Mt % per_core_M == 0", - ], - ), - MatchingExceptionRule( - "ttnn.matmul", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp", - "Nt % per_core_N == 0", - ], - ), - MatchingExceptionRule( - "ttnn.reshape", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp", - "new_volume == old_volume", - "Invalid arguments to reshape", - ], - ), - MatchingExceptionRule( - "ttnn.reshape", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp", - "tensor_shape.rank() <= 4", - "Only up to 4D tensors", - ], - ), - MatchingExceptionRule( - "ttnn permute", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/permute/device/permute_device_operation.cpp", - "attributes.dims.back() == tensor_args.input_tensor.get_logical_shape().rank() - 1", - "Last dimension of permute must be the last dimension of the input tensor as page-breaking is not supported at the moment", - ], - ), - MatchingExceptionRule( - "ttnn.pad", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/data_movement/pad/pad.cpp", - "Tensor rank is not 4", - ], - ), - MatchingExceptionRule( - "TTNN tensor types", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/tensor/types.cpp", - "normalized_index >= 0 and normalized_index < rank", - "Index is out of bounds for the rank", - ], - ), - MatchingExceptionRule( - "TTNN tensor types", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/tensor/types.cpp", - "shape[cur_idx] == 1", - "Can't convert shape rank", - ], - ), - MatchingExceptionRule("ttmetal allocations", ["RuntimeError", "Statically allocated circular buffers"]), - MatchingExceptionRule( - "ttmetal allocations", - [ - "RuntimeError", - "tt-metal/tt_metal/impl/allocator/allocator.cpp", - "Out of Memory: Not enough space to allocate", - ], - ), - MatchingExceptionRule( - "ttnn core", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/core/work_split/work_split_tilize.hpp", - "logical_shape.rank() >= 2 && logical_shape.rank() <= 4", - "Only 2D, 3D, and 4D tensors are supported", - ], - ), - MatchingExceptionRule( - "ttnn softmax", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp", - "input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B", - "Inputs must be of bfloat16 or bfloat8_b type", - ], - ), - MatchingExceptionRule( - "ttnn unsqueeze_to_4D", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/core/core.cpp", - "Tensor rank is greater than 4", - ], - ), - MatchingExceptionRule( - "ttnn matmul", - [ - "RuntimeError", - "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op.cpp", - "(input_tensor_a.get_legacy_shape()[-1] / in0_tile_shape[1]) % program_config.in0_block_w == 0", - "Kt must be divisible by in0_block_w", - ], - ), - MatchingExceptionRule( - "tt-metal ncrisc build", - [ - "RuntimeError", - "tt-metal/tt_metal/impl/program/program.cpp", - "Failed to generate binaries for reader_conv_activations_padded_with_halo_3x3_weights_v2", - "ncrisc build failed", - ], - ), - ], - ), -] - - -class UniqueOpTestInfo: - """ - Represents information about a unique operation test, that includes op name, operands - arguments, and the status of various compiler components. - - Attributes: - Op (str): The name of the operation. - Operands (List[str]): List of operands associated with the operation. - Args (List[str]): List of Operation Arguments if any - components (dict): A dictionary indicating the support status for each compiler component. - failure_reason (str): The reason for failure, if any, during testing. - """ - - def __init__( - self, - Op: str, - Operands: List[str], - Args: List[str], - ): - self.Op = str(Op) - self.Operands = Operands - self.Args = Args - self.components = {} - for compiler_component in CompilerComponent: - self.components[str(compiler_component.name)] = False - self.failure_reason = "" - - @classmethod - def create(cls, op_name, operand_names, operand_types, operand_shapes, operand_dtypes, args): - - operands = UniqueOpTestInfo.create_operands(operand_names, operand_types, operand_shapes, operand_dtypes) - - args = UniqueOpTestInfo.create_args(args) - - return cls(Op=op_name, Operands=operands, Args=args) - - @classmethod - def create_operands(cls, operand_names, operand_types, operand_shapes, operand_dtypes): - operands = [] - for operand_name, operand_type, operand_shape, operand_dtype in zip( - operand_names, operand_types, operand_shapes, operand_dtypes - ): - if isinstance(operand_shape, torch.Tensor): - operands.append(f"Operand(type={operand_type}, name={operand_name}, dtype={operand_dtype})") - else: - operands.append(f"Operand(type={operand_type}, shape={operand_shape}, dtype={operand_dtype})") - return operands - - @classmethod - def create_args(cls, args): - arg_info = [] - if not args.is_empty(): - for arg_name, arg_value in args.items(): - arg_info.append(f"{arg_name} : {arg_value}") - return arg_info - - def update_compiler_components(self, error_message: str = ""): - if error_message: - updated_compiler_component_status = False - # Iterate over all failure matching rules to find a match. - for rule in common_failure_matching_rules_list: - matched_compiler_component, match_err_msg = rule.match_rule(error_message) - if matched_compiler_component is not None: - updated_compiler_component_status = True - self.failure_reason = match_err_msg - # Set all the compiler components less than matched compiler component to True. - for compiler_component in CompilerComponent: - if compiler_component < matched_compiler_component: - self.components[str(compiler_component.name)] = True - break - # If no match is found, mark the UNKNOWN compiler component alone to True. - if not updated_compiler_component_status: - self.components[str(CompilerComponent.UNKNOWN.name)] = True - else: - # If no error message is provided, mark all compiler components (except UNKNOWN) to True. - for compiler_component in CompilerComponent: - if compiler_component != CompilerComponent.UNKNOWN: - self.components[str(compiler_component.name)] = True - - def __str__(self): - return f"UniqueOpTestInfo(op={self.Op}, Operands={self.Operands}, Args={self.Args}, components={self.components}, self.failure_reason={self.failure_reason})" - - -@dataclass -class ModelVariantInfo: - """ - Stores information about a model, variant, framework of the model, including its support rates for different compiler components. - - Attributes: - model_name (str): The name of the model. - variant_name (str): The name of the model variant. - framework (str): The framework used for the model. - unique_ops (List[UniqueOpTestInfo]): List of unique op configuration test info - forge_support_rate (float): The support rate for the Forge compiler component. Defaults to 0.0. - mlir_support_rate (float): The support rate for the MLIR compiler component. Defaults to 0.0. - ttmetal_support_rate (float): The support rate for the TT_METAL compiler component. Defaults to 0.0. - unknown_rate (float): The support rate for an unknown compiler component. Defaults to 0.0. - """ - - model_name: str - variant_name: str - framework: str - unique_ops: List[UniqueOpTestInfo] - forge_support_rate: float = 0.0 - mlir_support_rate: float = 0.0 - ttmetal_support_rate: float = 0.0 - unknown_rate: float = 0.0 - last_update_datetime: str = "" - - def get_support_rate(self, compiler_component: CompilerComponent): - # Check and return the appropriate support rate based on the compiler component. - if compiler_component == CompilerComponent.FORGE: - return self.forge_support_rate - elif compiler_component == CompilerComponent.MLIR: - return self.mlir_support_rate - elif compiler_component == CompilerComponent.TT_METAL: - return self.ttmetal_support_rate - elif compiler_component == CompilerComponent.UNKNOWN: - return self.unknown_rate - else: - logger.error(f"There is no compilercomponent {compiler_component.name}") - - def update_support_rate(self, compiler_component: CompilerComponent, support_rate: float): - # Update the appropriate support rate based on the compiler component. - if compiler_component == CompilerComponent.FORGE: - self.forge_support_rate = support_rate - elif compiler_component == CompilerComponent.MLIR: - self.mlir_support_rate = support_rate - elif compiler_component == CompilerComponent.TT_METAL: - self.ttmetal_support_rate = support_rate - elif compiler_component == CompilerComponent.UNKNOWN: - self.unknown_rate = support_rate - else: - logger.error(f"There is no compilercomponent {compiler_component.name}") - - def __str__(self): - model_variant_info = "" - model_variant_info += f"\t\tModel : {model_name}\n" - model_variant_info += f"\t\tVariant : {variant_name}\n" - model_variant_info += f"\t\tframework : {framework}\n" - model_variant_info += f"\t\tforge_support_rate : {forge_support_rate}\n" - model_variant_info += f"\t\tmlir_support_rate : {mlir_support_rate}\n" - model_variant_info += f"\t\tttmetal_support_rate : {ttmetal_support_rate}\n" - model_variant_info += f"\t\tunknown_rate : {unknown_rate}\n" - model_variant_info += f"\t\tlast_update_datetime : {last_update_datetime}\n" - for idx, unique_op in enumerate(unique_ops): - model_variant_info += f"\t\t\t\t{idx}){str(unique_op)}\n" - - -class HtmlSymbol(Enum): - PASS = "✅" # Checkmark - FAIL = "❌" # Crossmark - UNKNOWN = "�" # Question mark - - -class MarkDownWriter: - """ - A utility class for writing Markdown files, including headings, tables, and links. - - Attributes: - markdown_file_name (str): The name of the Markdown file (without extension). - markdown_file_dir_path (str): The directory path where the Markdown file is created. - """ - - def __init__(self, markdown_file_name: str, markdown_file_dir_path: str = None, open_file: bool = True): - self.markdown_file_name = markdown_file_name - self.markdown_file = self.markdown_file_name + ".md" - if markdown_file_dir_path is not None: - self.markdown_file_dir_path = markdown_file_dir_path - else: - self.markdown_file_dir_path = os.getcwd() - os.makedirs(self.markdown_file_dir_path, exist_ok=True) - if open_file: - self.file = open(os.path.join(self.markdown_file_dir_path, self.markdown_file), "w") - - def write(self, data: str): - self.file.write(data) - - def write_line(self, data: str): - self.write(data + "\n") - - def write_table_heading(self, table_heading: str, heading_rank: int = 1): - table_heading = str("#" * heading_rank) + " " + table_heading - self.write_line(table_heading) - - def write_table(self, headers, rows): - # Create a Markdown table using the tabulate library with GitHub-flavored table formatting. - markdown_table = tabulate(rows, headers, tablefmt="github", colalign=("center",) * len(headers)) - self.write_line(markdown_table) - - @classmethod - def get_component_names_for_header(cls, compiler_component: CompilerComponent): - if compiler_component == CompilerComponent.FORGE: - return "Forge-Fe" - elif compiler_component == CompilerComponent.MLIR: - return "MLIR" - elif compiler_component == CompilerComponent.TT_METAL: - return "Metalium" - elif compiler_component == CompilerComponent.UNKNOWN: - return "N/A" - else: - logger.error(f"There is no compilercomponent {compiler_component.name}") - - def write_html_table_heading(self, table_heading: str, heading_rank: int = 1): - table_heading = f"{table_heading}" - self.write_line(table_heading) - - def create_html_table_and_write(self, headers: Dict[str, List[str]], rows: List[List[str]]): - sub_headers = [] - for headers_list in headers.values(): - sub_headers.extend(headers_list) - - sub_header_row_data_length_match = all([True if len(row) == len(sub_headers) else False for row in rows]) - - assert sub_header_row_data_length_match, "Sub headers and table row length is not matched" - - table_df = pd.DataFrame(data=rows, columns=sub_headers) - - top_headers = [ - (main_header, sub_header) for main_header, sub_headers in headers.items() for sub_header in sub_headers - ] - table_df.columns = pd.MultiIndex.from_tuples(top_headers) - - html_table = table_df.to_html(index=False, na_rep=" ", justify="center", escape=False) - - self.write_line(html_table) - - @classmethod - def create_md_link(cls, link_text: str, url_or_path: str): - return f"[{link_text}]({url_or_path})" - - @classmethod - def create_html_link(cls, link_text: str, url_or_path: str): - return f'{link_text}' - - def close_file(self): - self.file.close() - - -def check_path(directory_or_file_path: str): - - # Check if a file or directory exists, return True otherwise return False - if os.path.exists(directory_or_file_path): - logger.info(f"{directory_or_file_path} exists!") - return True - - logger.info(f"{directory_or_file_path} does not exist.") - return False - - -def dump_logs(log_files: Union[str, List[str]], content: str): - if isinstance(log_files, str): - log_files = [log_files] - for log_file in log_files: - log_file_dir_path = "/".join(log_file.split("/")[:-1]) - os.makedirs(log_file_dir_path, exist_ok=True) - with open(log_file, "w") as f: - f.write(content) - logger.info(f"Dumped test logs in {log_file}") - - -def collect_all_model_analysis_test(directory_or_file_path: str, output_directory_path: str): - """ - Collect all the tests marked with the `model_analysis` marker in a specified directory or file. - """ - - # Ensure the directory or file path exists - assert check_path( - directory_or_file_path - ), f"The directory path for collecting test {directory_or_file_path} doesn't exists" - - logger.info(f"Collecting all the test that has model_analysis marker in {directory_or_file_path}") - - collected_test_outputs = "" - try: - # Run pytest to collect tests with the `model_analysis` marker - result = subprocess.run( - ["pytest", directory_or_file_path, "-m", "model_analysis", "--collect-only"], - capture_output=True, - text=True, - check=True, - ) - - # Append stdout and stderr to the collected outputs - collected_test_outputs += "STDOUT:\n" + result.stdout - collected_test_outputs += "STDERR:\n" + result.stderr - - except subprocess.CalledProcessError as e: - collected_test_outputs += e.output - - # Save the collected test outputs to a file - collected_test_file_path = os.path.join(output_directory_path, "collected_tests.txt") - dump_logs(collected_test_file_path, collected_test_outputs) - - # Extract tests from the collected test outputs - test_list = [] - with open(collected_test_file_path, "r") as collected_test_file: - lines = collected_test_file.readlines() - test_lines = False - for line in lines: - if "Automatic Model Analysis Collected tests:" in line: - test_lines = True - elif "Automatic Model Analysis Collected test count:" in line: - test_lines = False - break - elif test_lines: - test_list.append(str(line).replace("\n", "")) - - return test_list - - -def generate_and_export_unique_ops_tests(test_directory_or_file_path: str, unique_ops_output_directory_path: str): - """ - Collect the test with model_analysis marker in the test_directory_or_file_path specified by the user - and then generate unique op test for all the collected test and return the list of directory path - containing exported models unique op configuration as xlsx file - """ - - # Collect all the pytest inside the test_directory_or_file_path specified by the user with model_analysis marker - test_list = collect_all_model_analysis_test(test_directory_or_file_path, unique_ops_output_directory_path) - - assert ( - test_list != [] - ), f"No tests found in the {test_directory_or_file_path} path with model_analysis pytest marker" - - # Create a dictonary contains model_name as key and model tests(i.e include variant, task) as values - model_name_to_tests = {} - for test in test_list: - model_name = test.split("::")[0].split("/")[-1].replace(".py", "").replace("test_", "") - if model_name not in model_name_to_tests.keys(): - model_name_to_tests[model_name] = [test] - else: - model_name_to_tests[model_name].append(test) - - # Generate unique op test for the all collected test and save the models unique ops test information in the unique_ops_output_directory_path - model_output_dir_paths = [] - for model_name, tests in model_name_to_tests.items(): - model_output_dir_path = os.path.join(unique_ops_output_directory_path, model_name) - os.makedirs(model_output_dir_path, exist_ok=True) - model_output_dir_paths.append(model_output_dir_path) - for test in tests: - logger.info(f"Running the tests : {test}") - try: - result = subprocess.run( - ["pytest", test, "-vss", "--generate-unique-op-tests"], - capture_output=True, - text=True, - check=True, - env=dict( - os.environ, - FORGE_DISABLE_REPORTIFY_DUMP="1", - FORGE_EXPORT_TVM_GENERATED_UNIQUE_OP_TESTS_DETAILS="1", - FORGE_EXPORT_TVM_GENERATED_UNIQUE_OP_TESTS_DETAILS_DIR_PATH=model_output_dir_path, - ), - ) - if result.returncode != 0: - logger.error(f"Error while running the pytest:\n stdout: {result.stdout}\n stderr: {result.stderr}") - else: - logger.info(f"Successfully generated and exported unique ops test") - - except subprocess.CalledProcessError as e: - logger.error(f"Error while running the pytest:\n {e.output}") - - return model_output_dir_paths - - -def extract_unique_op_tests_from_models(model_output_dir_paths: List[str], unique_ops_output_directory_path: str): - """ - Extract unique op configuration across all the models which will avoid running the redudant - op configuration again by using the exported unique op configuration test details and models metadata - """ - - # Dictionary to store all the operations found in model variants - models_operations = {} - unique_op_count = 0 - - # Dictionary to store constants (name and tensor) used in the model variants - models_contants = {} - - # Iterate through all provided model directories - for model_output_dir_path in model_output_dir_paths: - - # Extract the model name from the directory path - model_name = model_output_dir_path.split("/")[-1] - - # List all model variants in the directory - model_variants = os.listdir(model_output_dir_path) - - # Process each model variant - for model_variant in model_variants: - - model_variant_dir_path = os.path.join(model_output_dir_path, model_variant) - - # Look for `.xlsx` and `.json` file containing unique operation details and metadata - model_variant_tvm_generated_unique_op_xslx_file_path = None - model_variant_tvm_generated_unique_op_metadata_file_path = None - for f in os.listdir(model_variant_dir_path): - if f.endswith(".xlsx"): - model_variant_tvm_generated_unique_op_xslx_file_path = os.path.join(model_variant_dir_path, f) - elif f.endswith(".json"): - model_variant_tvm_generated_unique_op_metadata_file_path = os.path.join(model_variant_dir_path, f) - - # Skip if either `.xlsx` or `.json` file is missing - if ( - model_variant_tvm_generated_unique_op_xslx_file_path is None - or model_variant_tvm_generated_unique_op_metadata_file_path is None - ): - continue - - # Read the `.xlsx` file contains model variant unique op configuration details - model_variant_df = pd.read_excel( - model_variant_tvm_generated_unique_op_xslx_file_path, - header=0, - usecols=[ - "Op", - "Operand_Names", - "Operand_Shapes", - "Operand_Types", - "Operand_Dtypes", - "Args", - "Testfile", - ], - ) - - # Read the `.json` file contains model variant metadata information - with open(model_variant_tvm_generated_unique_op_metadata_file_path, "r") as json_file: - model_variant_metadata = json.load(json_file) - - # Load model variants parameters and buffers as tensors from specified files - named_parameters = torch.load(model_variant_metadata["named_params_file_name"]) - if model_variant_metadata["param_file_name"] is not None: - serialized_params = torch.load(model_variant_metadata["param_file_name"]) - named_parameters.update(serialized_params) - named_buffers = torch.load(model_variant_metadata["named_buffers_file_name"]) - named_parameters.update(named_buffers) - - # Process each row in the `.xlsx` file to extract operation configurations - for index, row in model_variant_df.iterrows(): - row = row.to_dict() - unique_op_count += 1 - - operand_names = ast.literal_eval(row["Operand_Names"]) - operand_types = ast.literal_eval(row["Operand_Types"]) - operand_types = [NodeType.from_json(operand_type) for operand_type in operand_types] - - # Prepare metadata associated with the operation - metadata = {} - metadata["model_variant_info"] = {} - metadata["model_variant_info"]["model_name"] = model_name - metadata["model_variant_info"]["variant_name"] = model_variant_metadata["module_name"] - metadata["model_variant_info"]["framework"] = model_variant_metadata["framework"] - metadata["model_variant_info"]["Testfile"] = row["Testfile"] - - # Create an Operation object with op name, shape, nodetype, dtype, arguments and operation metadata - models_operations[unique_op_count] = Operation( - function_name=row["Op"], - input_names=operand_names, - args=ast.literal_eval(row["Args"]), - input_shapes=ast.literal_eval(row["Operand_Shapes"]), - input_dtypes=ast.literal_eval(row["Operand_Dtypes"]), - input_node_types=operand_types, - metadata=metadata, - ) - - # Store tensor which has constant nodetype as operands - for operand_type, operand_name in zip(operand_types, operand_names): - if operand_type == NodeType.Constant: - models_contants[operand_name] = named_parameters[operand_name] - - # Extract unique operation configuration configuration across all the model variants - unique_operations = UniqueOperations.create_unique_operations(models_operations, models_contants) - - # Dump the extracted unique operation configurations across all the model variants to a log file - models_unique_op_config_file_path = os.path.join( - unique_ops_output_directory_path, "extracted_unique_configuration_across_models.log" - ) - dump_logs(models_unique_op_config_file_path, str(unique_operations)) - - return unique_operations - - -def run_models_unique_op_tests(unique_operations, unique_ops_output_directory_path, dump_failure_logs): - """ - Run unique op configuration test that has been collected across all the models and populate the test result in the model variants - """ - - models_details = {} - - # Iterate over each unique operation - for forge_op_function_name in sorted(unique_operations): - - # Extract operation name from forge op function name - op_name = forge_op_function_name.split(".")[-1] - - # Get the unique operands and operation arguments assiocated the operand metadata - unique_operands_and_opargs_opmetadata = unique_operations[ - forge_op_function_name - ].get_unique_operands_and_opargs_opmetadata() - - for operands, opargs_opmetadata in unique_operands_and_opargs_opmetadata: - - for args, operation_metadata in opargs_opmetadata.get_op_args_and_metadata(): - - # Extract operands details such as names types, shapes, and data types - operand_types = [NodeType.to_json(operand_type) for operand_type in operands.get_operand_types()] - operand_shapes = operands.get_operand_shapes() - operand_dtypes = operands.get_operand_dtypes() - - # Extract model varaiant info such as model, variant and framework name - model_variant_info_list = operation_metadata["model_variant_info"] - framework = model_variant_info_list[0]["framework"] - operand_names = operation_metadata["operand_names"][0] - - # Create a UniqueOpTestInfo object to store details about the operation (name, operands, args) - unique_op_test_info = UniqueOpTestInfo.create( - op_name=op_name, - operand_names=operand_names, - operand_types=operand_types, - operand_shapes=operand_shapes, - operand_dtypes=operand_dtypes, - args=args, - ) - - # Extract the test file path - test = model_variant_info_list[0]["Testfile"] - logger.info(f"Running the test: {test}") - - # If dump_failure_logs is set to True, prepare the log file paths for storing logs - if dump_failure_logs: - log_files = [] - for model_variant_info in model_variant_info_list: - log_file_dir_path = os.path.join( - unique_ops_output_directory_path, - model_variant_info["model_name"], - model_variant_info["variant_name"], - op_name, - ) - test_name = model_variant_info["Testfile"].split("::")[ - -1 - ] # Extract the test name from the test path - log_file_name = str(test_name) + "_log.txt" - log_file = os.path.join(log_file_dir_path, log_file_name) - log_files.append(log_file) - - # Start the timer to measure test execution time - start_time = time.time() - - try: - # Run the unique op test by using subprocess libary run method. - result = subprocess.run( - ["pytest", test, "-vss"], - check=True, - capture_output=True, - text=True, - timeout=180, - env=dict( - os.environ, - FORGE_DISABLE_REPORTIFY_DUMP="1", - ), - ) - - # Calculate elapsed time after test execution - elapsed_time = time.time() - start_time - - if result.returncode != 0: - logger.info(f"\tFailed ({elapsed_time:.2f} seconds)") - - # If the result.returncode is not equal to zero, collect the test stdout and stderr - error_message = "" - if result.stderr: - error_message += "STDERR: \n\n" - error_message += result.stderr - if result.stdout: - error_message += "STDOUT: \n\n" - error_message += result.stdout - - # Update the instance of the UniqueOpTestInfo components datamember status - # for each compiler component and error message in failure_reason datamember - unique_op_test_info.update_compiler_components(error_message) - - # Save failure logs if dump_failure_logs is set to True - if dump_failure_logs: - dump_logs(log_files, error_message) - - else: - # If the test passed (return code is 0), update the UniqueOpTestInfo instance - # components datamember for all compiler component to True expect COMPILERCOMPONENT.UNKNOWN - logger.info(f"\tPassed ({elapsed_time:.2f} seconds)") - unique_op_test_info.update_compiler_components() - - # Handle timeout exceptions if the test exceeds the allowed 60-second time limit - except subprocess.TimeoutExpired as e: - elapsed_time = time.time() - start_time - - error_message = "Test timed out after 180 seconds" - unique_op_test_info.update_compiler_components(error_message) - - logger.info(f"\tFailed ({elapsed_time:.2f} seconds) due to {error_message}") - - if dump_failure_logs: - dump_logs(log_files, error_message) - - # Do WH warm reset (potentially hang occurred) - logger.info("\tWarm reset...") - os.system("/home/software/syseng/wh/tt-smi -lr all") - - # Handle other exceptions during unique op test execution - except subprocess.CalledProcessError as e: - elapsed_time = time.time() - start_time - logger.info(f"\tFailed ({elapsed_time:.2f} seconds)") - - error_message = "" - if e.stderr: - error_message += "STDERR: \n\n" - error_message += e.stderr - if e.stdout: - error_message += "STDOUT: \n\n" - error_message += e.stdout - - # Update the UniqueOpTestInfo instance components datamember status - # for each compiler component and error message in failure_reason datamember - unique_op_test_info.update_compiler_components(error_message) - - if dump_failure_logs: - dump_logs(log_files, error_message) - - # Handle unexpected exceptions - except Exception as ex: - elapsed_time = time.time() - start_time - error_message = ( - f"An unexpected error occurred while running {test}: {ex} ({elapsed_time:.2f} seconds)" - ) - unique_op_test_info.update_compiler_components(error_message) - logger.info(error_message) - - if dump_failure_logs: - dump_logs(log_files, error_message) - - # Update model details dictionary with variant name as key and ModelVariantInfo as values - for model_variant_info in model_variant_info_list: - if model_variant_info["variant_name"] in models_details.keys(): - models_details[model_variant_info["variant_name"]].unique_ops.append(unique_op_test_info) - else: - models_details[model_variant_info["variant_name"]] = ModelVariantInfo( - model_name=model_variant_info["model_name"], - variant_name=model_variant_info["variant_name"], - framework=model_variant_info["framework"], - unique_ops=[unique_op_test_info], - ) - - # Calculate and update the compiler support rates for each component for all the model variants - for variant_name, model_variant_info in models_details.items(): - for compiler_component in CompilerComponent: - compiler_component_passed_test_count = sum( - [ - int(unique_op_test_info.components[str(compiler_component.name)]) - for unique_op_test_info in model_variant_info.unique_ops - ] - ) - total_num_of_test = len(model_variant_info.unique_ops) - compiler_component_pass_percentage = ( - str(math.ceil((compiler_component_passed_test_count / total_num_of_test) * 100.0)) + " %" - ) - models_details[variant_name].update_support_rate(compiler_component, compiler_component_pass_percentage) - - models_details[variant_name].last_update_datetime = time.strftime("%A, %d %b %Y %I:%M:%S %p", time.gmtime()) - - return models_details - - -def generate_markdown( - markdown_file_name: str, - markdown_file_dir_path: str, - table_heading: str, - table_headers: Dict[str, List[str]], - table_rows: List[List[str]], -): - """ - Generates a Markdown file that contains an HTML table with the given headers and rows. - """ - # Create a markdown file for summarizing the results for all models in a single file - markdown_writer = MarkDownWriter(markdown_file_name, markdown_file_dir_path) - - # Write a heading for the HTML table - markdown_writer.write_html_table_heading(table_heading) - - # Generate and write the HTML table to the Markdown file - markdown_writer.create_html_table_and_write(headers=table_headers, rows=table_rows) - - # Close the Markdown file after writing the table - markdown_writer.close_file() - - -def create_root_and_sub_markdown_file(models_details, markdown_directory_path): - """ - Creates root and sub Markdown files summarizing the models and their unique operation test results. - - The root Markdown file contains an overview of all models and their compiler support rates. - The sub Markdown files contain detailed results for each model variant's unique operation tests. - - """ - # Root markdown file name and directory path for saving the md file - root_markdown_file_name = "ModelsInfo" - root_markdown_directory_path = markdown_directory_path - - # HTML table heading for the root markdown and sub markdown files - root_markdown_table_heading = "List of models and current compiler support rates" - sub_markdown_table_heading = "Unique ops configuration and compiler support info" - - # List of compiler component names for table headers - compiler_component_names = [ - MarkDownWriter.get_component_names_for_header(compiler_component) for compiler_component in CompilerComponent - ] - - # HTML table header for the root markdown and sub markdown files - root_markdown_table_headers = { - "Model Details": ["Name", "Variant", "Framework"], - "Passing rate of unique ops for each component": compiler_component_names, - "Last update(in GMT)": ["Date & time"], - } - - sub_markdown_table_headers = { - "Operation Details": ["Name", "Operands", "Arguments"], - "Component Passing Check": compiler_component_names, - "Issues": ["Failure Reason"], - } - - root_markdown_table_rows = [] - - # Iterate over model variants to generate sub markdown files and populate root markdown rows - for model_variant_info in models_details.values(): - - # Prepare the path for the sub markdown file to store test results for this model variant - sub_markdown_file_name = model_variant_info.variant_name - sub_markdown_directory_path = os.path.join(markdown_directory_path, "Models", model_variant_info.model_name) - - # List to store table rows for the sub markdown file - sub_markdown_table_rows = [] - - # Iterate over the unique operation test information to populate table rows for sub markdown - for unique_op_test_info in model_variant_info.unique_ops: - - table_data = [unique_op_test_info.Op] - table_data.append("
X
".join(unique_op_test_info.Operands)) - table_data.append("
".join(unique_op_test_info.Args)) - - # If unknown compiler component is set to True in unique_op_test_info, use the unknown symbol for indicating unknown compiler component status and for other compiler components set empty string - # else for unknown compiler component set empty string for indicating status and for other compiler component set pass or fail symbol - if unique_op_test_info.components[str(CompilerComponent.UNKNOWN.name)]: - for component_name, test_status in unique_op_test_info.components.items(): - test_status = ( - HtmlSymbol.UNKNOWN.value if component_name == str(CompilerComponent.UNKNOWN.name) else " " - ) - table_data.append(test_status) - else: - for component_name, test_status in unique_op_test_info.components.items(): - if component_name == str(CompilerComponent.UNKNOWN.name): - test_status = " " - else: - test_status = HtmlSymbol.PASS.value if test_status else HtmlSymbol.FAIL.value - table_data.append(test_status) - table_data.append(unique_op_test_info.failure_reason) - sub_markdown_table_rows.append(table_data) - - # Generate sub markdown file that contain model variant unique op test info - generate_markdown( - markdown_file_name=sub_markdown_file_name, - markdown_file_dir_path=sub_markdown_directory_path, - table_heading=sub_markdown_table_heading, - table_headers=sub_markdown_table_headers, - table_rows=sub_markdown_table_rows, - ) - - # Prepare a row for the root markdown file with summary details of the model variant - table_data = [model_variant_info.model_name] - - # Create an HTML link for the variant name, linking to its corresponding model variant markdown file - table_data.append( - MarkDownWriter.create_html_link( - model_variant_info.variant_name, - os.path.join("./Models", model_variant_info.model_name, model_variant_info.variant_name + ".md"), - ) - ) - - table_data.append(model_variant_info.framework) - for compiler_component in CompilerComponent: - table_data.append(model_variant_info.get_support_rate(compiler_component)) - table_data.append(model_variant_info.last_update_datetime) - root_markdown_table_rows.append(table_data) - - # Generate root markdown file that contain all the model variants result - generate_markdown( - markdown_file_name=root_markdown_file_name, - markdown_file_dir_path=root_markdown_directory_path, - table_heading=root_markdown_table_heading, - table_headers=root_markdown_table_headers, - table_rows=root_markdown_table_rows, - ) - - -def main(): - parser = argparse.ArgumentParser( - description="""Generate unique ops test for the models present in the test_directory_or_file_path - specified by the user and run the unique ops test and generate markdown files, the root markdown file contains model name, - variant name, framework and compiler components supported rate and sub-markdown file contains model variant unique op tests info""" - ) - - parser.add_argument( - "--test_directory_or_file_path", - type=str, - default=os.path.join(os.getcwd(), "forge/test"), - help="Specify the directory or file path containing models test with model_analysis pytest marker", - ) - parser.add_argument( - "--dump_failure_logs", - action="store_true", - help="Specify the flag to dump the unique ops test failure logs.", - ) - parser.add_argument( - "--markdown_directory_path", - default=os.path.join(os.getcwd(), "models_analysis_docs"), - required=False, - help="Specify the directory path for saving models information as markdowns file", - ) - parser.add_argument( - "--unique_ops_output_directory_path", - default=os.path.join(os.getcwd(), "unique_ops"), - required=False, - help="Specify the output directory path for saving models unique op tests outputs(i.e failure logs, xlsx file)", - ) - - args = parser.parse_args() - - model_output_dir_paths = generate_and_export_unique_ops_tests( - test_directory_or_file_path=args.test_directory_or_file_path, - unique_ops_output_directory_path=args.unique_ops_output_directory_path, - ) - - unique_operations = extract_unique_op_tests_from_models( - model_output_dir_paths=model_output_dir_paths, - unique_ops_output_directory_path=args.unique_ops_output_directory_path, - ) - - models_details = run_models_unique_op_tests( - unique_operations=unique_operations, - unique_ops_output_directory_path=args.unique_ops_output_directory_path, - dump_failure_logs=args.dump_failure_logs, - ) - - create_root_and_sub_markdown_file( - models_details=models_details, markdown_directory_path=args.markdown_directory_path - ) - - -if __name__ == "__main__": - main() diff --git a/scripts/model_analysis/__init__.py b/scripts/model_analysis/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/model_analysis/exception_rules.py b/scripts/model_analysis/exception_rules.py new file mode 100644 index 000000000..a558ba82b --- /dev/null +++ b/scripts/model_analysis/exception_rules.py @@ -0,0 +1,286 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +from utils import CompilerComponent, MatchingExceptionRule, MatchingCompilerComponentException + +common_failure_matching_rules_list = [ + MatchingCompilerComponentException( + CompilerComponent.FORGE, + [ + MatchingExceptionRule( + "forge_module evaluation", ["AssertionError", "Setting a tensor value of incorrect shape"] + ), + MatchingExceptionRule( + "embedding indicies tensor", + ["IndexError", "forge/forge/op/eval/forge/embedding.py", "index out of range in self"], + ), + MatchingExceptionRule( + "post_initial_graph_passes", + [ + "RuntimeError", + "has_newstyle_interface(std::get(type), false)", + "decomposing a type with old OpType interface, expects new OpType interface", + ], + ), + MatchingExceptionRule( + "lower_to_mlir", + ["RuntimeError", "Found Unsupported operations while lowering from TTForge to TTIR in forward graph"], + ), + MatchingExceptionRule( + "lower_to_mlir", + ["RuntimeError", "Unsupported data format during lowering from TTForge to TTIR"], + ), + MatchingExceptionRule( + "mlir generation failure", ["RuntimeError", "Generated MLIR module failed verification"] + ), + MatchingExceptionRule( + "Convert tt-forge attribute to an MLIR attribute", ["RuntimeError", "Unhandled attribute type"] + ), + MatchingExceptionRule("Runtime Datatype Unsupported", ["RuntimeError", "Unhandled dtype Bool"]), + # Compiled model Runtime + MatchingExceptionRule( + "Runtime Datatype mismatch", ["RuntimeError", "Tensor", "data type mismatch: expected", "got"] + ), + MatchingExceptionRule( + "Runtime Shape mismatch", ["RuntimeError", "Tensor", "shape mismatch: expected", "got"] + ), + MatchingExceptionRule( + "Runtime stride mismatch", ["RuntimeError", "Tensor", "stride mismatch: expected", "got"] + ), + MatchingExceptionRule( + "Runtime Input count mismatch", ["RuntimeError", "Input count mismatch: expected", "got"] + ), + MatchingExceptionRule( + "post_const_eval_tensors", ["RuntimeError", "unsupported memory format option Contiguous"] + ), + ], + ), + MatchingCompilerComponentException( + CompilerComponent.MLIR, + [ + MatchingExceptionRule( + "TTIR to TTNN Conv2dOpConversionPattern", + [ + "tt_forge_signal_handler", + "tt-mlir/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp", + "Conv2dOpConversionPattern::matchAndRewrite(ttir::Conv2dOp, OpAdaptor, ConversionPatternRewriter &)", + "adaptor.getPaddingBottom() == adaptor.getPaddingTop()", + "TTNN only supports padding height/width attributes. Thus, padding_top", + "must equal padding_bottom for the op to execute as expected", + ], + ), + MatchingExceptionRule( + "ttnn.reshape mlir pipeline", + [ + "RuntimeError", + "'ttnn.reshape' op Shape attribute size must match output tensor rank", + "Failed to run MLIR compiler pass pipeline", + ], + ), + MatchingExceptionRule( + "ttnn.maxpool2d mlir pipeline", + [ + "RuntimeError", + "ttnn.max_pool2d currently only supports an input type of bfloat16", + "Failed to run MLIR compiler pass pipeline", + ], + ), + MatchingExceptionRule("mlir pipeline", ["RuntimeError", "Failed to run MLIR compiler pass pipeline"]), + MatchingExceptionRule( + "MLIR runtime ttnn ", ["tt::exception", "tt-mlir/runtime/lib/ttnn/runtime.cpp", "Unsupported data type"] + ), + ], + ), + MatchingCompilerComponentException( + CompilerComponent.TT_METAL, + [ + MatchingExceptionRule( + "TT-Metal vs Forge Output Data mismatch", + [ + "ValueError", + "Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model", + ", compiled_model", + ], + ), + MatchingExceptionRule( + "ttnn.tilize validation", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/tilize/device/tilize_op.cpp", + "input_tensor_a.get_dtype() == DataType::BFLOAT16", + ], + ), + MatchingExceptionRule( + "ttnn.tilize_with_val_padding validation", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/tilize_with_val_padding/device/tilize_with_val_padding_op.cpp", + "input_tensor_a.get_dtype() == DataType::BFLOAT16 or input_tensor_a.get_dtype() == DataType::UINT32", + "Can only tilize bfloat16 or uint32 tensors", + ], + ), + MatchingExceptionRule( + "ttnn.embedding validation", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp", + "weights.get_dtype() == DataType::BFLOAT16", + ], + ), + MatchingExceptionRule( + "ttnn.embedding validation", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp", + "a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16", + "Input must be UINT32 or BFLOAT16", + ], + ), + MatchingExceptionRule( + "ttnn elementwise binary", ["RuntimeError", "BinaryOpType cannot be mapped to BcastOpMath"] + ), + MatchingExceptionRule( + "ttnn elementwise binary", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp", + "ttnn::operations::binary::BinaryDeviceOperation: unsupported broadcast", + ], + ), + MatchingExceptionRule( + "ttnn.concat validation", + ["RuntimeError", "Tile padding along concatenated dim", "not supported for concat yet"], + ), + MatchingExceptionRule( + "ttnn.reshape validation", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/reshape_on_device/device/reshape_op.cpp", + "input_tensor_a.get_dtype() == DataType::BFLOAT16", + ], + ), + MatchingExceptionRule( + "ttnn.matmul", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp", + "Mt % per_core_M == 0", + ], + ), + MatchingExceptionRule( + "ttnn.matmul", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp", + "Nt % per_core_N == 0", + ], + ), + MatchingExceptionRule( + "ttnn.reshape", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp", + "new_volume == old_volume", + "Invalid arguments to reshape", + ], + ), + MatchingExceptionRule( + "ttnn.reshape", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/reshape_view/reshape.cpp", + "tensor_shape.rank() <= 4", + "Only up to 4D tensors", + ], + ), + MatchingExceptionRule( + "ttnn permute", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/permute/device/permute_device_operation.cpp", + "attributes.dims.back() == tensor_args.input_tensor.get_logical_shape().rank() - 1", + "Last dimension of permute must be the last dimension of the input tensor as page-breaking is not supported at the moment", + ], + ), + MatchingExceptionRule( + "ttnn.pad", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/data_movement/pad/pad.cpp", + "Tensor rank is not 4", + ], + ), + MatchingExceptionRule( + "TTNN tensor types", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/tensor/types.cpp", + "normalized_index >= 0 and normalized_index < rank", + "Index is out of bounds for the rank", + ], + ), + MatchingExceptionRule( + "TTNN tensor types", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/tensor/types.cpp", + "shape[cur_idx] == 1", + "Can't convert shape rank", + ], + ), + MatchingExceptionRule("ttmetal allocations", ["RuntimeError", "Statically allocated circular buffers"]), + MatchingExceptionRule( + "ttmetal allocations", + [ + "RuntimeError", + "tt-metal/tt_metal/impl/allocator/allocator.cpp", + "Out of Memory: Not enough space to allocate", + ], + ), + MatchingExceptionRule( + "ttnn core", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/core/work_split/work_split_tilize.hpp", + "logical_shape.rank() >= 2 && logical_shape.rank() <= 4", + "Only 2D, 3D, and 4D tensors are supported", + ], + ), + MatchingExceptionRule( + "ttnn softmax", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp", + "input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B", + "Inputs must be of bfloat16 or bfloat8_b type", + ], + ), + MatchingExceptionRule( + "ttnn unsqueeze_to_4D", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/core/core.cpp", + "Tensor rank is greater than 4", + ], + ), + MatchingExceptionRule( + "ttnn matmul", + [ + "RuntimeError", + "tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op.cpp", + "(input_tensor_a.get_legacy_shape()[-1] / in0_tile_shape[1]) % program_config.in0_block_w == 0", + "Kt must be divisible by in0_block_w", + ], + ), + MatchingExceptionRule( + "tt-metal ncrisc build", + [ + "RuntimeError", + "tt-metal/tt_metal/impl/program/program.cpp", + "Failed to generate binaries for reader_conv_activations_padded_with_halo_3x3_weights_v2", + "ncrisc build failed", + ], + ), + ], + ), +] diff --git a/scripts/model_analysis/generate_models_ops_test.py b/scripts/model_analysis/generate_models_ops_test.py new file mode 100644 index 000000000..ecd927585 --- /dev/null +++ b/scripts/model_analysis/generate_models_ops_test.py @@ -0,0 +1,76 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import os +from loguru import logger +import argparse +from utils import create_python_package, run_precommit, remove_directory +from unique_ops_utils import generate_and_export_unique_ops_tests, extract_unique_op_tests_from_models +from forge.tvm_unique_op_generation import generate_models_ops_test + + +def main(): + parser = argparse.ArgumentParser( + description="""Extract the unique ops configuration for the models present in the test_directory_or_file_path + specified by the user and then extract the unique ops configuration across all the models and + generate models ops test inside the models_ops_test_output_directory_path specified by the user""" + ) + + parser.add_argument( + "--test_directory_or_file_path", + type=str, + default=os.path.join(os.getcwd(), "forge/test/models"), + help="Specify the directory or file path containing models test with model_analysis pytest marker", + ) + parser.add_argument( + "--unique_ops_output_directory_path", + default=os.path.join(os.getcwd(), "models_unique_ops_output"), + required=False, + help="Specify the output directory path for saving models unique ops outputs(i.e xlsx and json files)", + ) + parser.add_argument( + "--models_ops_test_output_directory_path", + default=os.path.join(os.getcwd(), "forge/test"), + required=False, + help="Specify the directory path for saving generated models ops test", + ) + parser.add_argument( + "--models_ops_test_package_name", + default="models_ops", + required=False, + help="Specify the python package name for saving generated models ops test", + ) + + args = parser.parse_args() + + model_output_dir_paths = generate_and_export_unique_ops_tests( + test_directory_or_file_path=args.test_directory_or_file_path, + unique_ops_output_directory_path=args.unique_ops_output_directory_path, + extract_tvm_unique_ops_config=True, + ) + + unique_ops_config_across_all_models_ops_test_file_path = os.path.join( + args.unique_ops_output_directory_path, "extracted_unique_ops_config_across_all_models_ops_test.log" + ) + unique_operations_across_all_models_ops_test = extract_unique_op_tests_from_models( + model_output_dir_paths=model_output_dir_paths, + unique_ops_config_file_path=unique_ops_config_across_all_models_ops_test_file_path, + use_constant_value=False, + ) + remove_directory( + directory_path=os.path.join(args.models_ops_test_output_directory_path, args.models_ops_test_package_name) + ) + create_python_package( + package_path=args.models_ops_test_output_directory_path, package_name=args.models_ops_test_package_name + ) + generate_models_ops_test( + unique_operations_across_all_models_ops_test, + os.path.join(args.models_ops_test_output_directory_path, args.models_ops_test_package_name), + ) + run_precommit( + directory_path=os.path.join(args.models_ops_test_output_directory_path, args.models_ops_test_package_name) + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/model_analysis/markdown.py b/scripts/model_analysis/markdown.py new file mode 100644 index 000000000..d00dfcc3c --- /dev/null +++ b/scripts/model_analysis/markdown.py @@ -0,0 +1,100 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import os +from loguru import logger +import pandas as pd +from tabulate import tabulate +from enum import Enum +from typing import Dict, List +from utils import CompilerComponent + + +class HtmlSymbol(Enum): + PASS = "✅" # Checkmark + FAIL = "❌" # Crossmark + UNKNOWN = "�" # Question mark + + +class MarkDownWriter: + """ + A utility class for writing Markdown files, including headings, tables, and links. + + Attributes: + markdown_file_name (str): The name of the Markdown file (without extension). + markdown_file_dir_path (str): The directory path where the Markdown file is created. + """ + + def __init__(self, markdown_file_name: str, markdown_file_dir_path: str = None, open_file: bool = True): + self.markdown_file_name = markdown_file_name + self.markdown_file = self.markdown_file_name + ".md" + if markdown_file_dir_path is not None: + self.markdown_file_dir_path = markdown_file_dir_path + else: + self.markdown_file_dir_path = os.getcwd() + os.makedirs(self.markdown_file_dir_path, exist_ok=True) + if open_file: + self.file = open(os.path.join(self.markdown_file_dir_path, self.markdown_file), "w") + + def write(self, data: str): + self.file.write(data) + + def write_line(self, data: str): + self.write(data + "\n") + + def write_table_heading(self, table_heading: str, heading_rank: int = 1): + table_heading = str("#" * heading_rank) + " " + table_heading + self.write_line(table_heading) + + def write_table(self, headers, rows): + # Create a Markdown table using the tabulate library with GitHub-flavored table formatting. + markdown_table = tabulate(rows, headers, tablefmt="github", colalign=("center",) * len(headers)) + self.write_line(markdown_table) + + @classmethod + def get_component_names_for_header(cls, compiler_component: CompilerComponent): + if compiler_component == CompilerComponent.FORGE: + return "Forge-Fe" + elif compiler_component == CompilerComponent.MLIR: + return "MLIR" + elif compiler_component == CompilerComponent.TT_METAL: + return "Metalium" + elif compiler_component == CompilerComponent.UNKNOWN: + return "N/A" + else: + logger.error(f"There is no compilercomponent {compiler_component.name}") + + def write_html_table_heading(self, table_heading: str, heading_rank: int = 1): + table_heading = f"{table_heading}" + self.write_line(table_heading) + + def create_html_table_and_write(self, headers: Dict[str, List[str]], rows: List[List[str]]): + sub_headers = [] + for headers_list in headers.values(): + sub_headers.extend(headers_list) + + sub_header_row_data_length_match = all([True if len(row) == len(sub_headers) else False for row in rows]) + + assert sub_header_row_data_length_match, "Sub headers and table row length is not matched" + + table_df = pd.DataFrame(data=rows, columns=sub_headers) + + top_headers = [ + (main_header, sub_header) for main_header, sub_headers in headers.items() for sub_header in sub_headers + ] + table_df.columns = pd.MultiIndex.from_tuples(top_headers) + + html_table = table_df.to_html(index=False, na_rep=" ", justify="center", escape=False) + + self.write_line(html_table) + + @classmethod + def create_md_link(cls, link_text: str, url_or_path: str): + return f"[{link_text}]({url_or_path})" + + @classmethod + def create_html_link(cls, link_text: str, url_or_path: str): + return f'{link_text}' + + def close_file(self): + self.file.close() diff --git a/scripts/model_analysis/run_analysis_and_generate_md_files.py b/scripts/model_analysis/run_analysis_and_generate_md_files.py new file mode 100644 index 000000000..9924db695 --- /dev/null +++ b/scripts/model_analysis/run_analysis_and_generate_md_files.py @@ -0,0 +1,554 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import subprocess +import os +import time +import json +from loguru import logger +import math +import argparse +import pandas as pd +from typing import Dict, List +from dataclasses import dataclass +import ast + +import torch + +from forge.tvm_unique_op_generation import Operation, NodeType, UniqueOperations + +from exception_rules import common_failure_matching_rules_list +from markdown import HtmlSymbol, MarkDownWriter +from unique_ops_utils import generate_and_export_unique_ops_tests, extract_unique_op_tests_from_models +from utils import CompilerComponent, check_path, dump_logs, remove_directory + + +class UniqueOpTestInfo: + """ + Represents information about a unique operation test, that includes op name, operands + arguments, and the status of various compiler components. + + Attributes: + op (str): The name of the operation. + operands (List[str]): List of operands associated with the operation. + args (List[str]): List of Operation Arguments if any + components (dict): A dictionary indicating the support status for each compiler component. + failure_reason (str): The reason for failure, if any, during testing. + """ + + def __init__( + self, + op: str, + operands: List[str], + args: List[str], + ): + self.op = str(op) + self.operands = operands + self.args = args + self.components = {} + for compiler_component in CompilerComponent: + self.components[str(compiler_component.name)] = False + self.failure_reason = "" + + @classmethod + def create(cls, op_name, operand_names, operand_types, operand_shapes, operand_dtypes, args): + + operands = cls.create_operands(operand_names, operand_types, operand_shapes, operand_dtypes) + + args = cls.create_args(args) + + return cls(op=op_name, operands=operands, args=args) + + @classmethod + def create_operands(cls, operand_names, operand_types, operand_shapes, operand_dtypes): + operands = [] + for operand_name, operand_type, operand_shape, operand_dtype in zip( + operand_names, operand_types, operand_shapes, operand_dtypes + ): + if isinstance(operand_shape, torch.Tensor): + operands.append(f"Operand(type={operand_type}, name={operand_name}, dtype={operand_dtype})") + else: + operands.append(f"Operand(type={operand_type}, shape={operand_shape}, dtype={operand_dtype})") + return operands + + @classmethod + def create_args(cls, args): + arg_info = [] + if not args.is_empty(): + for arg_name, arg_value in args.items(): + arg_info.append(f"{arg_name} : {arg_value}") + return arg_info + + def update_compiler_components(self, error_message: str = ""): + if error_message: + updated_compiler_component_status = False + # Iterate over all failure matching rules to find a match. + for rule in common_failure_matching_rules_list: + matched_compiler_component, match_err_msg = rule.match_rule(error_message) + if matched_compiler_component is not None: + updated_compiler_component_status = True + self.failure_reason = match_err_msg + # Set all the compiler components less than matched compiler component to True. + for compiler_component in CompilerComponent: + if compiler_component < matched_compiler_component: + self.components[str(compiler_component.name)] = True + break + # If no match is found, mark the UNKNOWN compiler component alone to True. + if not updated_compiler_component_status: + self.components[str(CompilerComponent.UNKNOWN.name)] = True + else: + # If no error message is provided, mark all compiler components (except UNKNOWN) to True. + for compiler_component in CompilerComponent: + if compiler_component != CompilerComponent.UNKNOWN: + self.components[str(compiler_component.name)] = True + + def __str__(self): + return f"UniqueOpTestInfo(op={self.op}, operands={self.operands}, args={self.args}, components={self.components}, self.failure_reason={self.failure_reason})" + + +@dataclass +class ModelVariantInfo: + """ + Stores information about a model, variant, framework of the model, including its support rates for different compiler components. + + Attributes: + model_name (str): The name of the model. + variant_name (str): The name of the model variant. + framework (str): The framework used for the model. + unique_ops (List[UniqueOpTestInfo]): List of unique op configuration test info + forge_support_rate (float): The support rate for the Forge compiler component. Defaults to 0.0. + mlir_support_rate (float): The support rate for the MLIR compiler component. Defaults to 0.0. + ttmetal_support_rate (float): The support rate for the TT_METAL compiler component. Defaults to 0.0. + unknown_rate (float): The support rate for an unknown compiler component. Defaults to 0.0. + """ + + model_name: str + variant_name: str + framework: str + unique_ops: List[UniqueOpTestInfo] + forge_support_rate: float = 0.0 + mlir_support_rate: float = 0.0 + ttmetal_support_rate: float = 0.0 + unknown_rate: float = 0.0 + last_update_datetime: str = "" + + def get_support_rate(self, compiler_component: CompilerComponent): + # Check and return the appropriate support rate based on the compiler component. + if compiler_component == CompilerComponent.FORGE: + return self.forge_support_rate + elif compiler_component == CompilerComponent.MLIR: + return self.mlir_support_rate + elif compiler_component == CompilerComponent.TT_METAL: + return self.ttmetal_support_rate + elif compiler_component == CompilerComponent.UNKNOWN: + return self.unknown_rate + else: + logger.error(f"There is no compilercomponent {compiler_component.name}") + + def update_support_rate(self, compiler_component: CompilerComponent, support_rate: float): + # Update the appropriate support rate based on the compiler component. + if compiler_component == CompilerComponent.FORGE: + self.forge_support_rate = support_rate + elif compiler_component == CompilerComponent.MLIR: + self.mlir_support_rate = support_rate + elif compiler_component == CompilerComponent.TT_METAL: + self.ttmetal_support_rate = support_rate + elif compiler_component == CompilerComponent.UNKNOWN: + self.unknown_rate = support_rate + else: + logger.error(f"There is no compilercomponent {compiler_component.name}") + + def __str__(self): + model_variant_info = "" + model_variant_info += f"\t\tModel : {model_name}\n" + model_variant_info += f"\t\tVariant : {variant_name}\n" + model_variant_info += f"\t\tframework : {framework}\n" + model_variant_info += f"\t\tforge_support_rate : {forge_support_rate}\n" + model_variant_info += f"\t\tmlir_support_rate : {mlir_support_rate}\n" + model_variant_info += f"\t\tttmetal_support_rate : {ttmetal_support_rate}\n" + model_variant_info += f"\t\tunknown_rate : {unknown_rate}\n" + model_variant_info += f"\t\tlast_update_datetime : {last_update_datetime}\n" + for idx, unique_op in enumerate(unique_ops): + model_variant_info += f"\t\t\t\t{idx}){str(unique_op)}\n" + + +def run_models_unique_op_tests(unique_operations, unique_ops_output_directory_path, dump_failure_logs): + """ + Run unique op configuration test that has been collected across all the models and populate the test result in the model variants + """ + + models_details = {} + + # Iterate over each unique operation + for forge_op_function_name in sorted(unique_operations): + + # Extract operation name from forge op function name + op_name = forge_op_function_name.split(".")[-1] + + # Get the unique operands and operation arguments assiocated the operand metadata + unique_operands_and_opargs_opmetadata = unique_operations[ + forge_op_function_name + ].get_unique_operands_and_opargs_opmetadata() + + for operands, opargs_opmetadata in unique_operands_and_opargs_opmetadata: + + for args, operation_metadata in opargs_opmetadata.get_op_args_and_metadata(): + + # Extract operands details such as names types, shapes, and data types + operand_types = [NodeType.to_json(operand_type) for operand_type in operands.get_operand_types()] + operand_shapes = operands.get_operand_shapes() + operand_dtypes = operands.get_operand_dtypes() + + # Extract model varaiant info such as model, variant and framework name + model_variant_info_list = operation_metadata["model_variant_info"] + framework = model_variant_info_list[0]["framework"] + operand_names = operation_metadata["operand_names"][0] + + # Create a UniqueOpTestInfo object to store details about the operation (name, operands, args) + unique_op_test_info = UniqueOpTestInfo.create( + op_name=op_name, + operand_names=operand_names, + operand_types=operand_types, + operand_shapes=operand_shapes, + operand_dtypes=operand_dtypes, + args=args, + ) + + # Extract the test file path + test = model_variant_info_list[0]["Testfile"] + logger.info(f"Running the test: {test}") + + # If dump_failure_logs is set to True, prepare the log file paths for storing logs + if dump_failure_logs: + log_files = [] + for model_variant_info in model_variant_info_list: + log_file_dir_path = os.path.join( + unique_ops_output_directory_path, + model_variant_info["model_name"], + model_variant_info["variant_name"], + op_name, + ) + test_name = model_variant_info["Testfile"].split("::")[ + -1 + ] # Extract the test name from the test path + log_file_name = str(test_name) + "_log.txt" + log_file = os.path.join(log_file_dir_path, log_file_name) + log_files.append(log_file) + + # Start the timer to measure test execution time + start_time = time.time() + + try: + # Run the unique op test by using subprocess libary run method. + result = subprocess.run( + ["pytest", test, "-vss"], + check=True, + capture_output=True, + text=True, + timeout=180, + env=dict( + os.environ, + FORGE_DISABLE_REPORTIFY_DUMP="1", + ), + ) + + # Calculate elapsed time after test execution + elapsed_time = time.time() - start_time + + if result.returncode != 0: + logger.info(f"\tFailed ({elapsed_time:.2f} seconds)") + + # If the result.returncode is not equal to zero, collect the test stdout and stderr + error_message = "" + if result.stderr: + error_message += "STDERR: \n\n" + error_message += result.stderr + if result.stdout: + error_message += "STDOUT: \n\n" + error_message += result.stdout + + # Update the instance of the UniqueOpTestInfo components datamember status + # for each compiler component and error message in failure_reason datamember + unique_op_test_info.update_compiler_components(error_message) + + # Save failure logs if dump_failure_logs is set to True + if dump_failure_logs: + dump_logs(log_files, error_message) + + else: + # If the test passed (return code is 0), update the UniqueOpTestInfo instance + # components datamember for all compiler component to True expect COMPILERCOMPONENT.UNKNOWN + logger.info(f"\tPassed ({elapsed_time:.2f} seconds)") + unique_op_test_info.update_compiler_components() + + # Handle timeout exceptions if the test exceeds the allowed 60-second time limit + except subprocess.TimeoutExpired as e: + elapsed_time = time.time() - start_time + + error_message = "Test timed out after 180 seconds" + unique_op_test_info.update_compiler_components(error_message) + + logger.info(f"\tFailed ({elapsed_time:.2f} seconds) due to {error_message}") + + if dump_failure_logs: + dump_logs(log_files, error_message) + + # Do WH warm reset (potentially hang occurred) + logger.info("\tWarm reset...") + os.system("/home/software/syseng/wh/tt-smi -lr all") + + # Handle other exceptions during unique op test execution + except subprocess.CalledProcessError as e: + elapsed_time = time.time() - start_time + logger.info(f"\tFailed ({elapsed_time:.2f} seconds)") + + error_message = "" + if e.stderr: + error_message += "STDERR: \n\n" + error_message += e.stderr + if e.stdout: + error_message += "STDOUT: \n\n" + error_message += e.stdout + + # Update the UniqueOpTestInfo instance components datamember status + # for each compiler component and error message in failure_reason datamember + unique_op_test_info.update_compiler_components(error_message) + + if dump_failure_logs: + dump_logs(log_files, error_message) + + # Handle unexpected exceptions + except Exception as ex: + elapsed_time = time.time() - start_time + error_message = ( + f"An unexpected error occurred while running {test}: {ex} ({elapsed_time:.2f} seconds)" + ) + unique_op_test_info.update_compiler_components(error_message) + logger.info(error_message) + + if dump_failure_logs: + dump_logs(log_files, error_message) + + # Update model details dictionary with variant name as key and ModelVariantInfo as values + for model_variant_info in model_variant_info_list: + if model_variant_info["variant_name"] in models_details.keys(): + models_details[model_variant_info["variant_name"]].unique_ops.append(unique_op_test_info) + else: + models_details[model_variant_info["variant_name"]] = ModelVariantInfo( + model_name=model_variant_info["model_name"], + variant_name=model_variant_info["variant_name"], + framework=model_variant_info["framework"], + unique_ops=[unique_op_test_info], + ) + + # Calculate and update the compiler support rates for each component for all the model variants + for variant_name, model_variant_info in models_details.items(): + for compiler_component in CompilerComponent: + compiler_component_passed_test_count = sum( + [ + int(unique_op_test_info.components[str(compiler_component.name)]) + for unique_op_test_info in model_variant_info.unique_ops + ] + ) + total_num_of_test = len(model_variant_info.unique_ops) + compiler_component_pass_percentage = ( + str(math.ceil((compiler_component_passed_test_count / total_num_of_test) * 100.0)) + " %" + ) + models_details[variant_name].update_support_rate(compiler_component, compiler_component_pass_percentage) + + models_details[variant_name].last_update_datetime = time.strftime("%A, %d %b %Y %I:%M:%S %p", time.gmtime()) + + return models_details + + +def generate_markdown( + markdown_file_name: str, + markdown_file_dir_path: str, + table_heading: str, + table_headers: Dict[str, List[str]], + table_rows: List[List[str]], +): + """ + Generates a Markdown file that contains an HTML table with the given headers and rows. + """ + # Create a markdown file for summarizing the results for all models in a single file + markdown_writer = MarkDownWriter(markdown_file_name, markdown_file_dir_path) + + # Write a heading for the HTML table + markdown_writer.write_html_table_heading(table_heading) + + # Generate and write the HTML table to the Markdown file + markdown_writer.create_html_table_and_write(headers=table_headers, rows=table_rows) + + # Close the Markdown file after writing the table + markdown_writer.close_file() + + +def create_root_and_sub_markdown_file(models_details, markdown_directory_path): + """ + Creates root and sub Markdown files summarizing the models and their unique operation test results. + + The root Markdown file contains an overview of all models and their compiler support rates. + The sub Markdown files contain detailed results for each model variant's unique operation tests. + + """ + # Root markdown file name and directory path for saving the md file + root_markdown_file_name = "ModelsInfo" + root_markdown_directory_path = markdown_directory_path + + # HTML table heading for the root markdown and sub markdown files + root_markdown_table_heading = "List of models and current compiler support rates" + sub_markdown_table_heading = "Unique ops configuration and compiler support info" + + # List of compiler component names for table headers + compiler_component_names = [ + MarkDownWriter.get_component_names_for_header(compiler_component) for compiler_component in CompilerComponent + ] + + # HTML table header for the root markdown and sub markdown files + root_markdown_table_headers = { + "Model Details": ["Name", "Variant", "Framework"], + "Passing rate of unique ops for each component": compiler_component_names, + "Last update(in GMT)": ["Date & time"], + } + + sub_markdown_table_headers = { + "Operation Details": ["Name", "Operands", "Arguments"], + "Component Passing Check": compiler_component_names, + "Issues": ["Failure Reason"], + } + + root_markdown_table_rows = [] + + remove_directory(directory_path=os.path.join(markdown_directory_path, "Models")) + + # Iterate over model variants to generate sub markdown files and populate root markdown rows + for model_variant_info in models_details.values(): + + # Prepare the path for the sub markdown file to store test results for this model variant + sub_markdown_file_name = model_variant_info.variant_name + sub_markdown_directory_path = os.path.join(markdown_directory_path, "Models", model_variant_info.model_name) + + # List to store table rows for the sub markdown file + sub_markdown_table_rows = [] + + # Iterate over the unique operation test information to populate table rows for sub markdown + for unique_op_test_info in model_variant_info.unique_ops: + + table_data = [unique_op_test_info.op] + table_data.append("
X
".join(unique_op_test_info.operands)) + table_data.append("
".join(unique_op_test_info.args)) + + # If unknown compiler component is set to True in unique_op_test_info, use the unknown symbol for indicating unknown compiler component status and for other compiler components set empty string + # else for unknown compiler component set empty string for indicating status and for other compiler component set pass or fail symbol + if unique_op_test_info.components[str(CompilerComponent.UNKNOWN.name)]: + for component_name, test_status in unique_op_test_info.components.items(): + test_status = ( + HtmlSymbol.UNKNOWN.value if component_name == str(CompilerComponent.UNKNOWN.name) else " " + ) + table_data.append(test_status) + else: + for component_name, test_status in unique_op_test_info.components.items(): + if component_name == str(CompilerComponent.UNKNOWN.name): + test_status = " " + else: + test_status = HtmlSymbol.PASS.value if test_status else HtmlSymbol.FAIL.value + table_data.append(test_status) + table_data.append(unique_op_test_info.failure_reason) + sub_markdown_table_rows.append(table_data) + + # Generate sub markdown file that contain model variant unique op test info + generate_markdown( + markdown_file_name=sub_markdown_file_name, + markdown_file_dir_path=sub_markdown_directory_path, + table_heading=sub_markdown_table_heading, + table_headers=sub_markdown_table_headers, + table_rows=sub_markdown_table_rows, + ) + + # Prepare a row for the root markdown file with summary details of the model variant + table_data = [model_variant_info.model_name] + + # Create an HTML link for the variant name, linking to its corresponding model variant markdown file + table_data.append( + MarkDownWriter.create_html_link( + model_variant_info.variant_name, + os.path.join("./Models", model_variant_info.model_name, model_variant_info.variant_name + ".md"), + ) + ) + + table_data.append(model_variant_info.framework) + for compiler_component in CompilerComponent: + table_data.append(model_variant_info.get_support_rate(compiler_component)) + table_data.append(model_variant_info.last_update_datetime) + root_markdown_table_rows.append(table_data) + + # Generate root markdown file that contain all the model variants result + generate_markdown( + markdown_file_name=root_markdown_file_name, + markdown_file_dir_path=root_markdown_directory_path, + table_heading=root_markdown_table_heading, + table_headers=root_markdown_table_headers, + table_rows=root_markdown_table_rows, + ) + + +def main(): + parser = argparse.ArgumentParser( + description="""Generate unique ops test for the models present in the test_directory_or_file_path + specified by the user and run the unique ops test and generate markdown files, the root markdown file contains model name, + variant name, framework and compiler components supported rate and sub-markdown file contains model variant unique op tests info""" + ) + + parser.add_argument( + "--test_directory_or_file_path", + type=str, + default=os.path.join(os.getcwd(), "forge/test/models"), + help="Specify the directory or file path containing models test", + ) + parser.add_argument( + "--dump_failure_logs", + action="store_true", + help="Specify the flag to dump the unique ops test failure logs.", + ) + parser.add_argument( + "--markdown_directory_path", + default=os.path.join(os.getcwd(), "model_analysis_docs"), + required=False, + help="Specify the directory path for saving models information as markdowns file", + ) + parser.add_argument( + "--unique_ops_output_directory_path", + default=os.path.join(os.getcwd(), "models_unique_ops_output"), + required=False, + help="Specify the output directory path for saving models unique op tests outputs(i.e failure logs, xlsx file)", + ) + + args = parser.parse_args() + + model_output_dir_paths = generate_and_export_unique_ops_tests( + test_directory_or_file_path=args.test_directory_or_file_path, + unique_ops_output_directory_path=args.unique_ops_output_directory_path, + ) + + unique_ops_config_across_all_models_file_path = os.path.join( + args.unique_ops_output_directory_path, "extracted_unique_op_config_across_all_models.log" + ) + unique_operations = extract_unique_op_tests_from_models( + model_output_dir_paths=model_output_dir_paths, + unique_ops_config_file_path=unique_ops_config_across_all_models_file_path, + ) + + models_details = run_models_unique_op_tests( + unique_operations=unique_operations, + unique_ops_output_directory_path=args.unique_ops_output_directory_path, + dump_failure_logs=args.dump_failure_logs, + ) + + create_root_and_sub_markdown_file( + models_details=models_details, markdown_directory_path=args.markdown_directory_path + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/model_analysis/unique_ops_utils.py b/scripts/model_analysis/unique_ops_utils.py new file mode 100644 index 000000000..9a7c10f38 --- /dev/null +++ b/scripts/model_analysis/unique_ops_utils.py @@ -0,0 +1,207 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import os +import json +from loguru import logger +import subprocess +import pandas as pd +from typing import List +import ast + +import torch + +from forge.tvm_unique_op_generation import Operation, NodeType, UniqueOperations +from utils import dump_logs, collect_all_model_analysis_test + + +def generate_and_export_unique_ops_tests( + test_directory_or_file_path: str, unique_ops_output_directory_path: str, extract_tvm_unique_ops_config: bool = False +): + """ + Collect all the tests that doesn't contain skip_model_analysis marker in the test_directory_or_file_path specified by the user + and then generate unique op test for all the collected test and return the list of directory path + containing exported models unique op configuration as xlsx file + """ + + # Collect all the test that doesn't contain skip_model_analysis marker inside the test_directory_or_file_path specified by the user + test_list = collect_all_model_analysis_test(test_directory_or_file_path, unique_ops_output_directory_path) + + assert test_list != [], f"No tests found in the {test_directory_or_file_path} path" + + # Create a dictonary contains model_name as key and model tests(i.e include variant, task) as values + model_name_to_tests = {} + for test in test_list: + model_name = test.split("::")[0].split("/")[-1].replace(".py", "").replace("test_", "") + if model_name not in model_name_to_tests.keys(): + model_name_to_tests[model_name] = [test] + else: + model_name_to_tests[model_name].append(test) + + if extract_tvm_unique_ops_config: + pytest_argument = "--extract-tvm-unique-ops-config" + else: + pytest_argument = "--generate-unique-ops-tests" + + # Generate unique op test for the all collected test and save the models unique ops test information in the unique_ops_output_directory_path + model_output_dir_paths = [] + for model_name, tests in model_name_to_tests.items(): + model_output_dir_path = os.path.join(unique_ops_output_directory_path, model_name) + os.makedirs(model_output_dir_path, exist_ok=True) + model_output_dir_paths.append(model_output_dir_path) + for test in tests: + logger.info(f"Running the tests : {test}") + try: + result = subprocess.run( + ["pytest", test, "-vss", pytest_argument], + capture_output=True, + text=True, + check=True, + env=dict( + os.environ, + FORGE_DISABLE_REPORTIFY_DUMP="1", + FORGE_EXPORT_TVM_UNIQUE_OPS_CONFIG_DETAILS="1", + FORGE_EXPORT_TVM_UNIQUE_OPS_CONFIG_DETAILS_DIR_PATH=model_output_dir_path, + ), + ) + if result.returncode != 0: + logger.error(f"Error while running the pytest:\n stdout: {result.stdout}\n stderr: {result.stderr}") + else: + logger.info(f"Successfully generated and exported unique ops test") + + except subprocess.CalledProcessError as e: + logger.error(f"Error while running the pytest:\n {e.output}") + + return model_output_dir_paths + + +def extract_unique_op_tests_from_models( + model_output_dir_paths: List[str], unique_ops_config_file_path: str, use_constant_value: bool = True +): + """ + Extract unique op configuration across all the models which will avoid running the redudant + op configuration again by using the exported unique op configuration test details and models metadata + """ + + # Dictionary to store all the operations found in model variants + models_operations = {} + unique_op_count = 0 + + # Dictionary to store constants (name and tensor) used in the model variants + models_contants = {} + + # Iterate through all provided model directories + for model_output_dir_path in model_output_dir_paths: + + # Extract the model name from the directory path + model_name = model_output_dir_path.split("/")[-1] + + # List all model variants in the directory + model_variants = os.listdir(model_output_dir_path) + + # Process each model variant + for model_variant in model_variants: + + model_variant_dir_path = os.path.join(model_output_dir_path, model_variant) + + # Look for `.xlsx` and `.json` file containing unique operation details and metadata + model_variant_tvm_generated_unique_op_xslx_file_path = None + model_variant_tvm_generated_unique_op_metadata_file_path = None + for f in os.listdir(model_variant_dir_path): + if f.endswith(".xlsx"): + model_variant_tvm_generated_unique_op_xslx_file_path = os.path.join(model_variant_dir_path, f) + elif f.endswith(".json"): + model_variant_tvm_generated_unique_op_metadata_file_path = os.path.join(model_variant_dir_path, f) + + # Skip if either `.xlsx` or `.json` file is missing + if ( + model_variant_tvm_generated_unique_op_xslx_file_path is None + or model_variant_tvm_generated_unique_op_metadata_file_path is None + ): + continue + + # Read the `.xlsx` file contains model variant unique op configuration details + model_variant_df = pd.read_excel( + model_variant_tvm_generated_unique_op_xslx_file_path, + header=0, + usecols=[ + "Op", + "Operand_Names", + "Operand_Shapes", + "Operand_Types", + "Operand_Dtypes", + "Args", + "Testfile", + ], + ) + + # Read the `.json` file contains model variant metadata information + with open(model_variant_tvm_generated_unique_op_metadata_file_path, "r") as json_file: + model_variant_metadata = json.load(json_file) + + # Load model variants parameters and buffers as tensors from specified files + named_parameters = torch.load(model_variant_metadata["named_params_file_name"]) + if model_variant_metadata["param_file_name"] is not None: + serialized_params = torch.load(model_variant_metadata["param_file_name"]) + named_parameters.update(serialized_params) + named_buffers = torch.load(model_variant_metadata["named_buffers_file_name"]) + named_parameters.update(named_buffers) + + # Process each row in the `.xlsx` file to extract operation configurations + for index, row in model_variant_df.iterrows(): + row = row.to_dict() + unique_op_count += 1 + + operand_names = ast.literal_eval(row["Operand_Names"]) + operand_types = ast.literal_eval(row["Operand_Types"]) + operand_types = [NodeType.from_json(operand_type) for operand_type in operand_types] + operand_shapes = ast.literal_eval(row["Operand_Shapes"]) + + # Prepare metadata associated with the operation + metadata = {} + metadata["model_variant_info"] = {} + metadata["model_variant_info"]["model_name"] = model_name + metadata["model_variant_info"]["variant_name"] = model_variant_metadata["module_name"] + metadata["model_variant_info"]["framework"] = model_variant_metadata["framework"] + if not pd.isna(row["Testfile"]): + metadata["model_variant_info"]["Testfile"] = row["Testfile"] + + # Replace the contant node operand name with operand shape which can be extracted from the model parameters and buffers + if not use_constant_value: + new_operand_shapes = [] + for operand_type, operand_shape in zip(operand_types, operand_shapes): + if operand_type == NodeType.Constant: + if len(named_parameters[operand_shape].shape) == 0: + new_operand_shapes.append((torch.numel(named_parameters[operand_shape]),)) + else: + new_operand_shapes.append(tuple(named_parameters[operand_shape].shape)) + else: + new_operand_shapes.append(operand_shape) + operand_shapes = list(new_operand_shapes) + + # Create an Operation object with op name, shape, nodetype, dtype, arguments and operation metadata + models_operations[unique_op_count] = Operation( + function_name=row["Op"], + input_names=operand_names, + args=ast.literal_eval(row["Args"]), + input_shapes=operand_shapes, + input_dtypes=ast.literal_eval(row["Operand_Dtypes"]), + input_node_types=operand_types, + metadata=metadata, + ) + + if use_constant_value: + # Store tensor which has constant nodetype as operands + for operand_type, operand_name in zip(operand_types, operand_names): + if operand_type == NodeType.Constant: + models_contants[operand_name] = named_parameters[operand_name] + + # Extract unique operation configuration configuration across all the model variants + unique_operations = UniqueOperations.create_unique_operations( + models_operations, models_contants, use_constant_value=use_constant_value + ) + + # Dump the extracted unique op configuration across all the model varaiants into log file. + dump_logs(unique_ops_config_file_path, str(unique_operations)) + + return unique_operations diff --git a/scripts/model_analysis/utils.py b/scripts/model_analysis/utils.py new file mode 100644 index 000000000..29d9477ab --- /dev/null +++ b/scripts/model_analysis/utils.py @@ -0,0 +1,203 @@ +# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC +# +# SPDX-License-Identifier: Apache-2.0 +import os +from loguru import logger +import subprocess +from enum import IntEnum +from typing import Union, Dict, List, Tuple +import shutil + +import torch + + +class CompilerComponent(IntEnum): + FORGE = 0 + MLIR = 1 + TT_METAL = 2 + UNKNOWN = 4 + + +class MatchingExceptionRule: + """ + Represents a rule for matching exceptions based on specific tokens. + + Attributes: + rule_name (str): Name of the rule. + rule_tokens (List[str]): List of tokens to match in an exception message. + """ + + def __init__(self, rule_name: str, rule_tokens: List[str]): + self.rule_name = rule_name + self.rule_tokens = rule_tokens + + def match_rule(self, exception: str): + """ + Matches the rule tokens against the given exception string. + + Args: + exception (str): Exception message to match against. + """ + # Check if all tokens in rule_tokens exist in the exception message and + # return the rule_token if matches otherwise return None + matched_token = all([True if token in exception else False for token in self.rule_tokens]) + if matched_token: + return " ".join(self.rule_tokens) + else: + return None + + +class MatchingCompilerComponentException: + """ + Represents exception matching for a specific compiler component. + + Attributes: + compiler_component (CompilerComponent): Compiler component associated with this exception matching. + exception_rules (List[MatchingExceptionRule]): List of exception rules for this component. + """ + + def __init__(self, compiler_component: CompilerComponent, exception_rules: List[MatchingExceptionRule]): + self.compiler_component = compiler_component + self.exception_rules = exception_rules + + def match_rule(self, exception: str): + """ + Matches the given exception against the exception rules of this compiler component. + Args: + exception (str): Exception message to be checked against the rules. + """ + # Iterate over all exception rules for this compiler component. + for rule in self.exception_rules: + # Attempt to match the current rule against the exception and If a match is found, + # return the compiler component and the constructed error message. + if rule.match_rule(exception) is not None: + match_err_msg = ( + f"[{self.compiler_component.name}] " + if rule.rule_name is None or rule.rule_name == "" + else f"[{self.compiler_component.name}][{rule.rule_name}] " + ) + match_err_msg += rule.match_rule(exception) + return self.compiler_component, match_err_msg + + return None, None + + +def check_path(directory_or_file_path: str): + + # Check if a file or directory exists, return True otherwise return False + if os.path.exists(directory_or_file_path): + logger.info(f"{directory_or_file_path} exists!") + return True + + logger.info(f"{directory_or_file_path} does not exist.") + return False + + +def create_python_package(package_path: str, package_name: str): + package_path = os.path.join(package_path, package_name) + if not check_path(package_path): + os.makedirs(package_path, exist_ok=True) + if not check_path(os.path.join(package_path, "__init__.py")): + try: + f = open(os.path.join(package_path, "__init__.py"), "x") + f.close() + logger.info(f"Created package in this path {package_path}") + except FileExistsError: + logger.info(f"__init__.py file already exists inside {package_path} path") + + +def dump_logs(log_files: Union[str, List[str]], content: str): + if isinstance(log_files, str): + log_files = [log_files] + for log_file in log_files: + log_file_dir_path = "/".join(log_file.split("/")[:-1]) + os.makedirs(log_file_dir_path, exist_ok=True) + with open(log_file, "w") as f: + f.write(content) + logger.info(f"Dumped test logs in {log_file}") + + +def collect_all_model_analysis_test(directory_or_file_path: str, output_directory_path: str): + """ + Collect all the tests that doesn't contains skip_model_analysis marker in a specified directory or file. + """ + + # Ensure the directory or file path exists + assert check_path( + directory_or_file_path + ), f"The directory path for collecting test {directory_or_file_path} doesn't exists" + + logger.info( + f"Collecting all the tests that doesn't contains skip_model_analysis marker in {directory_or_file_path}" + ) + + collected_test_outputs = "" + try: + # Run pytest to collect tests with the `model_analysis` marker + result = subprocess.run( + ["pytest", directory_or_file_path, "-m", "not skip_model_analysis", "--collect-only"], + capture_output=True, + text=True, + check=True, + ) + + # Append stdout and stderr to the collected outputs + collected_test_outputs += "STDOUT:\n" + result.stdout + collected_test_outputs += "STDERR:\n" + result.stderr + + except subprocess.CalledProcessError as e: + collected_test_outputs += e.output + + # Save the collected test outputs to a file + collected_test_file_path = os.path.join(output_directory_path, "collected_tests.txt") + dump_logs(collected_test_file_path, collected_test_outputs) + + # Extract tests from the collected test outputs + test_list = [] + with open(collected_test_file_path, "r") as collected_test_file: + lines = collected_test_file.readlines() + test_lines = False + for line in lines: + if "Automatic Model Analysis Collected tests:" in line: + test_lines = True + elif "Automatic Model Analysis Collected test count:" in line: + test_lines = False + break + elif test_lines: + test_list.append(str(line).replace("\n", "")) + + return test_list + + +def run_command(command: str): + command_outputs = "" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + check=True, + ) + + # Append stdout and stderr to the command outputs + command_outputs += result.stdout + result.stderr + + except subprocess.CalledProcessError as e: + command_outputs += e.output + + logger.info(f"Running the {command}\n{command_outputs}") + + +def run_precommit(directory_path: str): + run_command("pip install pre-commit") + run_command(f"pre-commit run --files $(find {directory_path} -type f)") + + +def remove_directory(directory_path: str): + if check_path(directory_path): + try: + shutil.rmtree(directory_path) + print(f"The directory path '{directory_path}' and its contents have been removed.") + except Exception as e: + print(f"An error occurred while removing the directory path {directory_path}: {e}")