|
19 | 19 | import itertools
|
20 | 20 | import logging
|
21 | 21 | import unittest
|
22 |
| -from collections import defaultdict, Iterable |
| 22 | +from collections import defaultdict |
| 23 | +from collections.abc import Iterable |
23 | 24 | from enum import Enum
|
24 | 25 | from functools import partial
|
25 | 26 | from typing import Union, Type
|
26 | 27 |
|
27 | 28 | import openvino as ov
|
28 | 29 | import pytest
|
29 |
| -import evaluate |
30 | 30 | import numpy as np
|
31 | 31 | import torch
|
32 |
| -from datasets import load_dataset |
33 | 32 | from parameterized import parameterized
|
34 | 33 | import nncf
|
35 | 34 | from transformers import (
|
36 | 35 | AutoModelForQuestionAnswering,
|
37 |
| - AutoModelForSequenceClassification, |
38 | 36 | AutoTokenizer,
|
39 | 37 | AutoProcessor,
|
40 |
| - TrainingArguments, |
41 |
| - default_data_collator, |
42 | 38 | )
|
43 | 39 | from transformers.testing_utils import slow
|
44 | 40 | from transformers.utils.quantization_config import QuantizationMethod
|
@@ -234,6 +230,57 @@ class OVQuantizerTest(unittest.TestCase):
|
234 | 230 | {"f8e5m2": 2, "int4": 28},
|
235 | 231 | ],
|
236 | 232 | ),
|
| 233 | + ( |
| 234 | + OVStableDiffusionPipeline, |
| 235 | + "stable-diffusion", |
| 236 | + dict( |
| 237 | + weight_only=False, |
| 238 | + dataset="conceptual_captions", |
| 239 | + num_samples=1, |
| 240 | + processor=MODEL_NAMES["stable-diffusion"], |
| 241 | + trust_remote_code=True, |
| 242 | + ), |
| 243 | + [ |
| 244 | + 112, 0, 0, 0, |
| 245 | + ], |
| 246 | + [ |
| 247 | + {"int8": 121}, {"int8": 42}, {"int8": 34}, {"int8": 64}, |
| 248 | + ], |
| 249 | + ), |
| 250 | + ( |
| 251 | + OVStableDiffusionXLPipeline, |
| 252 | + "stable-diffusion-xl", |
| 253 | + dict( |
| 254 | + weight_only=False, |
| 255 | + dtype="f8e5m2", |
| 256 | + dataset="laion/220k-GPT4Vision-captions-from-LIVIS", |
| 257 | + num_samples=1, |
| 258 | + processor=MODEL_NAMES["stable-diffusion-xl"], |
| 259 | + trust_remote_code=True, |
| 260 | + ), |
| 261 | + [ |
| 262 | + 174, 0, 0, 0, 0, |
| 263 | + ], |
| 264 | + [ |
| 265 | + {"f8e5m2": 183}, {"int8": 42}, {"int8": 34}, {"int8": 64}, {"int8": 66}, |
| 266 | + ], |
| 267 | + ), |
| 268 | + ( |
| 269 | + OVLatentConsistencyModelPipeline, |
| 270 | + "latent-consistency", |
| 271 | + OVQuantizationConfig( |
| 272 | + dtype="f8e4m3", |
| 273 | + dataset="laion/filtered-wit", |
| 274 | + num_samples=1, |
| 275 | + trust_remote_code=True, |
| 276 | + ), |
| 277 | + [ |
| 278 | + 79, 0, 0, 0, |
| 279 | + ], |
| 280 | + [ |
| 281 | + {"f8e4m3": 84}, {"int8": 42}, {"int8": 34}, {"int8": 40}, |
| 282 | + ], |
| 283 | + ), |
237 | 284 | ]
|
238 | 285 |
|
239 | 286 | @parameterized.expand(SUPPORTED_ARCHITECTURES_TORCH_MODEL)
|
@@ -359,6 +406,8 @@ def test_ov_model_static_quantization_with_auto_dataset(
|
359 | 406 | tokens = tokenizer("This is a sample input", return_tensors="pt")
|
360 | 407 | outputs = ov_model(**tokens)
|
361 | 408 | self.assertTrue("logits" in outputs)
|
| 409 | + elif any(x == model_cls for x in (OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline)): |
| 410 | + submodels = ov_model.ov_submodels.values() |
362 | 411 | else:
|
363 | 412 | raise Exception("Unexpected model class.")
|
364 | 413 |
|
|
0 commit comments