Skip to content

Commit 98fbee8

Browse files
authored
Merge branch 'main' into bump-transformers-version
2 parents 8ea90db + 64d9a49 commit 98fbee8

File tree

2 files changed

+0
-63
lines changed

2 files changed

+0
-63
lines changed

optimum/intel/neural_compressor/modeling_base.py

-6
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
GenerationConfig,
3636
GenerationMixin,
3737
PretrainedConfig,
38-
XLNetLMHeadModel,
3938
)
4039
from transformers.modeling_utils import no_init_weights
4140
from transformers.models.auto.auto_factory import _get_model_class
@@ -249,11 +248,6 @@ class INCModelForVision2Seq(INCModel):
249248
export_feature = "image-to-text"
250249

251250

252-
class INCModelForXLNetLM(INCModel):
253-
auto_model_class = XLNetLMHeadModel
254-
export_feature = "fill-mask"
255-
256-
257251
class INCModelForCausalLM(INCModel, BaseModelForCausalLM):
258252
auto_model_class = AutoModelForCausalLM
259253
export_feature = "text-generation"

optimum/intel/neural_compressor/quantization.py

-57
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
import copy
1616
import inspect
1717
import logging
18-
import warnings
1918
from enum import Enum
2019
from itertools import chain
2120
from pathlib import Path
@@ -31,18 +30,9 @@
3130
from neural_compressor.quantization import fit
3231
from torch.utils.data import DataLoader, RandomSampler
3332
from transformers import (
34-
AutoModelForCausalLM,
35-
AutoModelForMaskedLM,
36-
AutoModelForMultipleChoice,
37-
AutoModelForQuestionAnswering,
38-
AutoModelForSeq2SeqLM,
39-
AutoModelForSequenceClassification,
40-
AutoModelForTokenClassification,
41-
AutoModelForVision2Seq,
4233
DataCollator,
4334
PretrainedConfig,
4435
PreTrainedModel,
45-
XLNetLMHeadModel,
4636
default_data_collator,
4737
)
4838

@@ -71,7 +61,6 @@
7161
INCModelForSequenceClassification,
7262
INCModelForTokenClassification,
7363
INCModelForVision2Seq,
74-
INCModelForXLNetLM,
7564
)
7665
from .utils import INCDataLoader, _cfgs_to_fx_cfgs
7766

@@ -538,49 +527,3 @@ def _apply_quantization_from_config(q_config: Dict, model: torch.nn.Module) -> t
538527
q_model = convert(q_model, mapping=q_mapping, inplace=True)
539528

540529
return q_model
541-
542-
543-
class IncQuantizedModel(INCModel):
544-
@classmethod
545-
def from_pretrained(cls, *args, **kwargs):
546-
warnings.warn(
547-
f"The class `{cls.__name__}` has been depreciated and will be removed in optimum-intel v1.12, please use "
548-
f"`{cls.__name__.replace('IncQuantized', 'INC')}` instead."
549-
)
550-
return super().from_pretrained(*args, **kwargs)
551-
552-
553-
class IncQuantizedModelForQuestionAnswering(IncQuantizedModel):
554-
auto_model_class = AutoModelForQuestionAnswering
555-
556-
557-
class IncQuantizedModelForSequenceClassification(IncQuantizedModel):
558-
auto_model_class = AutoModelForSequenceClassification
559-
560-
561-
class IncQuantizedModelForTokenClassification(IncQuantizedModel):
562-
auto_model_class = AutoModelForTokenClassification
563-
564-
565-
class IncQuantizedModelForMultipleChoice(IncQuantizedModel):
566-
auto_model_class = AutoModelForMultipleChoice
567-
568-
569-
class IncQuantizedModelForSeq2SeqLM(IncQuantizedModel):
570-
auto_model_class = AutoModelForSeq2SeqLM
571-
572-
573-
class IncQuantizedModelForCausalLM(IncQuantizedModel):
574-
auto_model_class = AutoModelForCausalLM
575-
576-
577-
class IncQuantizedModelForMaskedLM(IncQuantizedModel):
578-
auto_model_class = AutoModelForMaskedLM
579-
580-
581-
class IncQuantizedModelForXLNetLM(IncQuantizedModel):
582-
auto_model_class = XLNetLMHeadModel
583-
584-
585-
class IncQuantizedModelForVision2Seq(IncQuantizedModel):
586-
auto_model_class = AutoModelForVision2Seq

0 commit comments

Comments
 (0)