Skip to content

Commit a2ab86d

Browse files
committed
make style
Signed-off-by: Issam Arabi <issam@cs.toronto.edu>
1 parent c9711b8 commit a2ab86d

File tree

2 files changed

+19
-16
lines changed

2 files changed

+19
-16
lines changed

optimum/bettertransformer/models/encoder_models.py

+6-14
Original file line numberDiff line numberDiff line change
@@ -1251,16 +1251,8 @@ def __init__(self, detr_layer, config):
12511251
self.norm_first = True
12521252

12531253
self.original_layers_mapping = {
1254-
"in_proj_weight": [
1255-
"self_attn.q_proj.weight",
1256-
"self_attn.k_proj.weight",
1257-
"self_attn.v_proj.weight"
1258-
],
1259-
"in_proj_bias": [
1260-
"self_attn.q_proj.bias",
1261-
"self_attn.k_proj.bias",
1262-
"self_attn.v_proj.bias"
1263-
],
1254+
"in_proj_weight": ["self_attn.q_proj.weight", "self_attn.k_proj.weight", "self_attn.v_proj.weight"],
1255+
"in_proj_bias": ["self_attn.q_proj.bias", "self_attn.k_proj.bias", "self_attn.v_proj.bias"],
12641256
"out_proj_weight": "self_attn.out_proj.weight",
12651257
"out_proj_bias": "self_attn.out_proj.bias",
12661258
"linear1_weight": "fc1.weight",
@@ -1272,7 +1264,7 @@ def __init__(self, detr_layer, config):
12721264
"norm2_weight": "final_layer_norm.weight",
12731265
"norm2_bias": "final_layer_norm.bias",
12741266
}
1275-
1267+
12761268
self.validate_bettertransformer()
12771269

12781270
def forward(self, hidden_states, attention_mask, output_attentions: bool, *_, **__):
@@ -1303,15 +1295,15 @@ def forward(self, hidden_states, attention_mask, output_attentions: bool, *_, **
13031295
self.linear2_bias,
13041296
attention_mask,
13051297
)
1306-
1298+
13071299
if hidden_states.is_nested and self.is_last_layer:
13081300
hidden_states = hidden_states.to_padded_tensor(0.0)
1309-
1301+
13101302
else:
13111303
raise NotImplementedError(
13121304
"Training and Autocast are not implemented for BetterTransformer + Detr. Please open an issue."
13131305
)
1314-
1306+
13151307
return (hidden_states,)
13161308

13171309

tests/bettertransformer/test_vision.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,18 @@ class BetterTransformersVisionTest(BetterTransformersTestMixin, unittest.TestCas
2727
r"""
2828
Testing suite for Vision Models - tests all the tests defined in `BetterTransformersTestMixin`
2929
"""
30-
SUPPORTED_ARCH = ["blip-2", "clip", "clip_text_model", "deit", "detr", "vilt", "vit", "vit_mae", "vit_msn", "yolos"]
30+
SUPPORTED_ARCH = [
31+
"blip-2",
32+
"clip",
33+
"clip_text_model",
34+
"deit",
35+
"detr",
36+
"vilt",
37+
"vit",
38+
"vit_mae",
39+
"vit_msn",
40+
"yolos",
41+
]
3142

3243
def prepare_inputs_for_class(self, model_id, model_type, batch_size=3, **preprocessor_kwargs):
3344
if model_type == "vilt":
@@ -56,7 +67,7 @@ def prepare_inputs_for_class(self, model_id, model_type, batch_size=3, **preproc
5667

5768
if model_type == "blip-2":
5869
inputs["decoder_input_ids"] = inputs["input_ids"]
59-
70+
6071
elif model_type == "detr":
6172
# Assuming detr just needs an image
6273
url = "http://images.cocodataset.org/val2017/000000039769.jpg"

0 commit comments

Comments
 (0)