Skip to content

Commit 35410e9

Browse files
echarlaixl-bat
authored andcommitted
remove unused function
1 parent ce838e3 commit 35410e9

File tree

3 files changed

+3
-41
lines changed

3 files changed

+3
-41
lines changed

optimum/commands/export/openvino.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def parse_args(parser: "ArgumentParser"):
161161
return parse_args_openvino(parser)
162162

163163
def run(self):
164-
from ...exporters.openvino.__main__ import main_export, get_relevant_task, export_optimized_diffusion_model
164+
from ...exporters.openvino.__main__ import main_export
165165
from ...intel.openvino.configuration import _DEFAULT_4BIT_CONFIGS, OVConfig
166166

167167
if self.args.fp16:

optimum/exporters/openvino/__main__.py

+1-38
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def main_export(
161161
ov_config = OVConfig(quantization_config=q_config)
162162

163163
original_task = task
164-
task = get_relevant_task(task, model_name_or_path)
164+
task = TasksManager.map_from_synonym(task)
165165
framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, framework=framework)
166166
library_name_is_not_provided = library_name is None
167167
library_name = TasksManager.infer_library_from_model(
@@ -376,40 +376,3 @@ class StoreAttr(object):
376376
if do_gptq_patching:
377377
torch.cuda.is_available = orig_cuda_check
378378
GPTQQuantizer.post_init_model = orig_post_init_model
379-
380-
381-
def get_relevant_task(task, model_name_or_path):
382-
relevant_task = TasksManager.map_from_synonym(task)
383-
if relevant_task == "auto":
384-
try:
385-
relevant_task = TasksManager.infer_task_from_model(model_name_or_path)
386-
except KeyError as e:
387-
raise KeyError(
388-
f"The task could not be automatically inferred. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}"
389-
)
390-
except RequestsConnectionError as e:
391-
raise RequestsConnectionError(
392-
f"The task could not be automatically inferred as this is available only for models hosted on the Hugging Face Hub. Please provide the argument --task with the relevant task from {', '.join(TasksManager.get_all_tasks())}. Detailed error: {e}"
393-
)
394-
return relevant_task
395-
396-
397-
def export_optimized_diffusion_model(model_name_or_path, output, task, quantization_config):
398-
task = get_relevant_task(task, model_name_or_path)
399-
if task == "latent-consistency":
400-
from optimum.intel import OVLatentConsistencyModelPipeline
401-
402-
model_cls = OVLatentConsistencyModelPipeline
403-
elif task == "stable-diffusion-xl":
404-
from optimum.intel import OVStableDiffusionXLPipeline
405-
406-
model_cls = OVStableDiffusionXLPipeline
407-
elif task == "stable-diffusion":
408-
from optimum.intel import OVStableDiffusionPipeline
409-
410-
model_cls = OVStableDiffusionPipeline
411-
else:
412-
raise NotImplementedError(f"Quantization in hybrid mode isn't supported for {task}.")
413-
414-
model = model_cls.from_pretrained(model_id=model_name_or_path, quantization_config=quantization_config)
415-
model.save_pretrained(output)

tests/openvino/test_exporters_cli.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -187,8 +187,7 @@ def test_exporters_cli_int8(self, task: str, model_type: str):
187187
def test_exporters_cli_hybrid_quantization(self, model_type: str, exp_num_fq: int, exp_num_int8: int):
188188
with TemporaryDirectory() as tmpdir:
189189
subprocess.run(
190-
f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} "
191-
f"--task {model_type} --dataset laion/filtered-wit --weight-format int8 {tmpdir}",
190+
f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --dataset laion/filtered-wit --weight-format int8 {tmpdir}",
192191
shell=True,
193192
check=True,
194193
)

0 commit comments

Comments
 (0)