19
19
from typing import TYPE_CHECKING , Optional
20
20
21
21
from ...exporters import TasksManager
22
- from ..base import BaseOptimumCLICommand , CommandInfo
23
22
from ...intel .utils .import_utils import DIFFUSERS_IMPORT_ERROR , is_diffusers_available
23
+ from ..base import BaseOptimumCLICommand , CommandInfo
24
24
25
25
26
26
logger = logging .getLogger (__name__ )
@@ -212,7 +212,6 @@ def run(self):
212
212
library_name = TasksManager .infer_library_from_model (self .args .model )
213
213
214
214
if library_name == "diffusers" and ov_config and ov_config .quantization_config .get ("dataset" ):
215
-
216
215
if not is_diffusers_available ():
217
216
raise ValueError (DIFFUSERS_IMPORT_ERROR .format ("Export of diffusers models" ))
218
217
@@ -222,13 +221,11 @@ def run(self):
222
221
class_name = diffusers_config .get ("_class_name" , None )
223
222
224
223
if class_name == "LatentConsistencyModelPipeline" :
225
-
226
224
from optimum .intel import OVLatentConsistencyModelPipeline
227
225
228
226
model_cls = OVLatentConsistencyModelPipeline
229
227
230
228
elif class_name == "StableDiffusionXLPipeline" :
231
-
232
229
from optimum .intel import OVStableDiffusionXLPipeline
233
230
234
231
model_cls = OVStableDiffusionXLPipeline
@@ -239,7 +236,9 @@ def run(self):
239
236
else :
240
237
raise NotImplementedError (f"Quantization in hybrid mode isn't supported for class { class_name } ." )
241
238
242
- model = model_cls .from_pretrained (self .args .model , export = True , quantization_config = ov_config .quantization_config )
239
+ model = model_cls .from_pretrained (
240
+ self .args .model , export = True , quantization_config = ov_config .quantization_config
241
+ )
243
242
model .save_pretrained (self .args .output )
244
243
245
244
else :
0 commit comments