From cb15e6e6bb98da4c82d3f8713566679de30031ea Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 15 Jan 2025 23:20:53 +0400 Subject: [PATCH] Static pipeline: use default properties when run on CPU (#1551) It allows to minimize dependency on results produced by LLMPipeline backend (e.g. stateful vs CB) --- tests/python_tests/test_llm_pipeline_static.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/python_tests/test_llm_pipeline_static.py b/tests/python_tests/test_llm_pipeline_static.py index d2d3673356..ae5c475fd9 100644 --- a/tests/python_tests/test_llm_pipeline_static.py +++ b/tests/python_tests/test_llm_pipeline_static.py @@ -17,7 +17,6 @@ from common import \ get_greedy, \ get_greedy_with_penalties, \ - get_multinomial_temperature, \ get_multinomial_all_parameters, \ get_multinomial_temperature_and_presence_penalty, \ get_beam_search @@ -34,7 +33,7 @@ 'NPUW_ONLINE_PIPELINE': 'NONE', 'PREFILL_CONFIG': { }, 'GENERATE_CONFIG': { } - } + } | get_default_properties() def generate_chat_history(model_path, device, pipeline_config, questions): @@ -56,7 +55,7 @@ def test_generation_compare_with_stateful(generation_config): prompt = 'What is OpenVINO?' model_path = read_model(get_models_list()[0])[1] - stateful_pipe = ov_genai.LLMPipeline(model_path, "CPU") + stateful_pipe = ov_genai.LLMPipeline(model_path, "CPU", **get_default_properties()) ref_out = stateful_pipe.generate(prompt, generation_config) static_pipe = ov_genai.LLMPipeline(model_path, "NPU", **common_config)