Skip to content

Commit eddccab

Browse files
added hidden states test
1 parent 8bf40fa commit eddccab

File tree

2 files changed

+52
-9
lines changed

2 files changed

+52
-9
lines changed

tests/openvino/test_modeling.py

+51-9
Original file line numberDiff line numberDiff line change
@@ -1529,21 +1529,30 @@ def test_pipeline_image_to_text(self, model_arch: str):
15291529

15301530

15311531
class OVModelForCustomTasksIntegrationTest(unittest.TestCase):
1532-
SUPPORTED_ARCHITECTURES = ["vit-with-attentions"]
1532+
SUPPORTED_ARCHITECTURES_WITH_ATTENTION = ["vit-with-attentions"]
1533+
SUPPORTED_ARCHITECTURES_WITH_HIDDEN_STATES = ["vit-with-hidden-states"]
15331534

1534-
@parameterized.expand(SUPPORTED_ARCHITECTURES)
1535-
def test_compare_to_transformers(self, model_arch):
1536-
model_id = MODEL_NAMES[model_arch]
1537-
set_seed(SEED)
1538-
ov_model = OVModelForCustomTasks.from_pretrained(model_id, ov_config=F32_CONFIG)
1539-
self.assertIsInstance(ov_model.config, PretrainedConfig)
1540-
transformers_model = AutoModelForImageClassification.from_pretrained(model_id)
1541-
preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
1535+
def _get_sample_image(self):
15421536
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
15431537
image = Image.open(requests.get(url, stream=True).raw)
1538+
return image
1539+
1540+
@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_ATTENTION)
1541+
def test_compare_output_attentions(self, model_arch):
1542+
model_id = MODEL_NAMES[model_arch]
1543+
1544+
image = self._get_sample_image()
1545+
preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
15441546
inputs = preprocessor(images=image, return_tensors="pt")
1547+
1548+
transformers_model = AutoModelForImageClassification.from_pretrained(model_id)
1549+
transformers_model.eval()
15451550
with torch.no_grad():
15461551
transformers_outputs = transformers_model(**inputs, output_attentions=True)
1552+
1553+
ov_model = OVModelForCustomTasks.from_pretrained(model_id, ov_config=F32_CONFIG)
1554+
self.assertIsInstance(ov_model.config, PretrainedConfig)
1555+
15471556
for input_type in ["pt", "np"]:
15481557
inputs = preprocessor(images=image, return_tensors=input_type)
15491558
ov_outputs = ov_model(**inputs)
@@ -1561,3 +1570,36 @@ def test_compare_to_transformers(self, model_arch):
15611570
del transformers_model
15621571
del ov_model
15631572
gc.collect()
1573+
1574+
@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HIDDEN_STATES)
1575+
def test_compare_output_hidden_states(self, model_arch):
1576+
model_id = MODEL_NAMES[model_arch]
1577+
1578+
image = self._get_sample_image()
1579+
preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
1580+
inputs = preprocessor(images=image, return_tensors="pt")
1581+
1582+
transformers_model = AutoModelForImageClassification.from_pretrained(model_id)
1583+
transformers_model.eval()
1584+
with torch.no_grad():
1585+
transformers_outputs = transformers_model(**inputs, output_hidden_states=True)
1586+
1587+
ov_model = OVModelForCustomTasks.from_pretrained(model_id, ov_config=F32_CONFIG)
1588+
self.assertIsInstance(ov_model.config, PretrainedConfig)
1589+
for input_type in ["pt", "np"]:
1590+
inputs = preprocessor(images=image, return_tensors=input_type)
1591+
ov_outputs = ov_model(**inputs)
1592+
self.assertIn("logits", ov_outputs)
1593+
self.assertIsInstance(ov_outputs.logits, TENSOR_ALIAS_TO_TYPE[input_type])
1594+
self.assertTrue(torch.allclose(torch.Tensor(ov_outputs.logits), transformers_outputs.logits, atol=1e-4))
1595+
self.assertTrue(
1596+
all(
1597+
torch.allclose(
1598+
torch.Tensor(ov_outputs.hidden_states[i]), transformers_outputs.hidden_states[i], atol=1e-4
1599+
)
1600+
for i in range(len(ov_outputs.hidden_states))
1601+
)
1602+
)
1603+
del transformers_model
1604+
del ov_model
1605+
gc.collect()

tests/openvino/utils_tests.py

+1
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@
101101
"unispeech_sat": "hf-internal-testing/tiny-random-UnispeechSatModel",
102102
"vit": "hf-internal-testing/tiny-random-vit",
103103
"vit-with-attentions": "IlyasMoutawwakil/vit-with-attentions",
104+
"vit-with-hidden-states": "IlyasMoutawwakil/vit-with-hidden_states",
104105
"vision-encoder-decoder": "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2",
105106
"wavlm": "hf-internal-testing/tiny-random-WavlmModel",
106107
"wav2vec2": "anton-l/wav2vec2-random-tiny-classifier",

0 commit comments

Comments
 (0)