Skip to content

Commit

Permalink
Update optimum/exporters/openvino/model_configs.py
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 20, 2024
1 parent f6fc95d commit afbac57
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 11 deletions.
12 changes: 5 additions & 7 deletions optimum/exporters/openvino/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1785,21 +1785,19 @@ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int

class DummyUnetVisionInputGenerator(DummyVisionInputGenerator):
def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"):
print("HERE")

if not input_name in ["sample", "latent_sample"]:
return super().generate(input_name, framework, int_dtype, float_dtype)
# add height and width discount for enable any resolution generation
return self.random_float_tensor(
shape=[self.batch_size, self.num_channels, self.height - 1 , self.width - 1],
framework=framework,
dtype=float_dtype,
)
shape=[self.batch_size, self.num_channels, self.height - 1, self.width - 1],
framework=framework,
dtype=float_dtype,
)


@register_in_tasks_manager("unet", *["semantic-segmentation"], library_name="diffusers")
class UnetOpenVINOConfig(UNetOnnxConfig):
DUMMY_INPUT_GENERATOR_CLASSES = (DummyUnetVisionInputGenerator, ) + UNetOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[1:]
DUMMY_INPUT_GENERATOR_CLASSES = (DummyUnetVisionInputGenerator,) + UNetOnnxConfig.DUMMY_INPUT_GENERATOR_CLASSES[1:]


@register_in_tasks_manager("sd3-transformer", *["semantic-segmentation"], library_name="diffusers")
Expand Down
6 changes: 2 additions & 4 deletions tests/openvino/test_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def test_compare_to_diffusers_pipeline(self, model_arch: str):
diffusers_output = diffusers_pipeline(**inputs, generator=get_generator("pt", SEED)).images

np.testing.assert_allclose(ov_output, diffusers_output, atol=6e-3, rtol=1e-2)

# test on inputs nondivisible on 64
height, width, batch_size = 96, 96, 1

Expand All @@ -155,7 +155,6 @@ def test_compare_to_diffusers_pipeline(self, model_arch: str):

np.testing.assert_allclose(ov_output, diffusers_output, atol=6e-3, rtol=1e-2)


@parameterized.expand(CALLBACK_SUPPORT_ARCHITECTURES)
@require_diffusers
def test_callback(self, model_arch: str):
Expand Down Expand Up @@ -802,7 +801,7 @@ def test_compare_to_diffusers_pipeline(self, model_arch: str):
diffusers_output = diffusers_pipeline(**inputs, generator=get_generator("pt", SEED)).images

np.testing.assert_allclose(ov_output, diffusers_output, atol=6e-3, rtol=1e-2)

# test generation when input resolution nondevisible on 64
height, width, batch_size = 96, 96, 1
inputs = self.generate_inputs(height=height, width=width, batch_size=batch_size)
Expand All @@ -815,7 +814,6 @@ def test_compare_to_diffusers_pipeline(self, model_arch: str):

np.testing.assert_allclose(ov_output, diffusers_output, atol=6e-3, rtol=1e-2)


@parameterized.expand(SUPPORTED_ARCHITECTURES)
@require_diffusers
def test_image_reproducibility(self, model_arch: str):
Expand Down

0 comments on commit afbac57

Please sign in to comment.