Skip to content

Commit

Permalink
Fix default ov config (#600)
Browse files Browse the repository at this point in the history
  • Loading branch information
echarlaix authored Mar 11, 2024
1 parent 5c683a3 commit 36459a1
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 9 deletions.
4 changes: 2 additions & 2 deletions optimum/intel/openvino/modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,8 @@ def _from_transformers(
save_dir = TemporaryDirectory()
save_dir_path = Path(save_dir.name)

# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None or not quantization_config:
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None and not quantization_config:
ov_config = None
else:
ov_config = OVConfig(dtype="fp32")
Expand Down
4 changes: 2 additions & 2 deletions optimum/intel/openvino/modeling_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,8 +314,8 @@ def _from_transformers(
save_dir = TemporaryDirectory()
save_dir_path = Path(save_dir.name)

# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None or not quantization_config:
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None and not quantization_config:
ov_config = None
else:
ov_config = OVConfig(dtype="fp32")
Expand Down
4 changes: 2 additions & 2 deletions optimum/intel/openvino/modeling_base_seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,8 @@ def _from_transformers(
if use_cache:
task = task + "-with-past"

# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None or not quantization_config:
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None and not quantization_config:
ov_config = None
else:
ov_config = OVConfig(dtype="fp32")
Expand Down
2 changes: 1 addition & 1 deletion optimum/intel/openvino/modeling_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def _from_transformers(
if use_cache:
task = task + "-with-past"

# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None and not quantization_config:
ov_export_config = None
else:
Expand Down
4 changes: 2 additions & 2 deletions optimum/intel/openvino/modeling_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,8 @@ def _from_transformers(
save_dir = TemporaryDirectory()
save_dir_path = Path(save_dir.name)

# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None or not quantization_config:
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
if load_in_8bit is None and not quantization_config:
ov_config = None
else:
ov_config = OVConfig(dtype="fp32")
Expand Down

0 comments on commit 36459a1

Please sign in to comment.