@@ -59,7 +59,7 @@ def test_run_glue(self):
59
59
with tempfile .TemporaryDirectory () as tmp_dir :
60
60
test_args = f"""
61
61
run_glue.py
62
- --model_name_or_path distilbert-base-uncased-finetuned-sst-2-english
62
+ --model_name_or_path hf-internal-testing/tiny-random-DistilBertForSequenceClassification
63
63
--task_name sst2
64
64
--apply_quantization
65
65
--apply_pruning
@@ -79,13 +79,12 @@ def test_run_glue(self):
79
79
with patch .object (sys , "argv" , test_args ):
80
80
run_glue .main ()
81
81
results = get_results (tmp_dir )
82
- self .assertGreaterEqual (results ["eval_accuracy" ], 0.70 )
83
82
84
83
def test_run_qa (self ):
85
84
with tempfile .TemporaryDirectory () as tmp_dir :
86
85
test_args = f"""
87
86
run_qa.py
88
- --model_name_or_path distilbert-base-uncased-distilled-squad
87
+ --model_name_or_path hf-internal-testing/tiny-random-DistilBertForQuestionAnswering
89
88
--dataset_name squad
90
89
--apply_quantization
91
90
--apply_pruning
@@ -105,14 +104,12 @@ def test_run_qa(self):
105
104
with patch .object (sys , "argv" , test_args ):
106
105
run_qa .main ()
107
106
results = get_results (tmp_dir )
108
- self .assertGreaterEqual (results ["eval_f1" ], 70 )
109
- self .assertGreaterEqual (results ["eval_exact_match" ], 70 )
110
107
111
108
def test_run_ner (self ):
112
109
with tempfile .TemporaryDirectory () as tmp_dir :
113
110
test_args = f"""
114
111
run_ner.py
115
- --model_name_or_path elastic/distilbert-base-uncased-finetuned-conll03-english
112
+ --model_name_or_path hf-internal-testing/tiny-random-RobertaForTokenClassification
116
113
--dataset_name conll2003
117
114
--apply_quantization
118
115
--apply_pruning
@@ -132,16 +129,12 @@ def test_run_ner(self):
132
129
with patch .object (sys , "argv" , test_args ):
133
130
run_ner .main ()
134
131
results = get_results (tmp_dir )
135
- self .assertGreaterEqual (results ["eval_accuracy" ], 0.70 )
136
- self .assertGreaterEqual (results ["eval_f1" ], 0.70 )
137
- self .assertGreaterEqual (results ["eval_precision" ], 0.70 )
138
- self .assertGreaterEqual (results ["eval_recall" ], 0.70 )
139
132
140
133
def test_run_swag (self ):
141
134
with tempfile .TemporaryDirectory () as tmp_dir :
142
135
test_args = f"""
143
136
run_swag.py
144
- --model_name_or_path ehdwns1516/bert-base-uncased_SWAG
137
+ --model_name_or_path hf-internal-testing/tiny-random-AlbertForMultipleChoice
145
138
--apply_quantization
146
139
--apply_pruning
147
140
--target_sparsity 0.02
@@ -160,15 +153,14 @@ def test_run_swag(self):
160
153
with patch .object (sys , "argv" , test_args ):
161
154
run_swag .main ()
162
155
results = get_results (tmp_dir )
163
- self .assertGreaterEqual (results ["eval_accuracy" ], 0.60 )
164
156
165
157
def test_run_clm (self ):
166
158
quantization_approach = "dynamic"
167
159
168
160
with tempfile .TemporaryDirectory () as tmp_dir :
169
161
test_args = f"""
170
162
run_clm.py
171
- --model_name_or_path EleutherAI/gpt-neo-125M
163
+ --model_name_or_path hf-internal-testing/tiny-random-GPT2LMHeadModel
172
164
--dataset_name wikitext
173
165
--dataset_config_name wikitext-2-raw-v1
174
166
--apply_quantization
@@ -190,15 +182,14 @@ def test_run_clm(self):
190
182
with patch .object (sys , "argv" , test_args ):
191
183
run_clm .main ()
192
184
results = get_results (tmp_dir )
193
- self .assertLessEqual (results ["eval_loss" ], 15 )
194
185
195
186
def test_run_mlm (self ):
196
187
quantization_approach = "static"
197
188
198
189
with tempfile .TemporaryDirectory () as tmp_dir :
199
190
test_args = f"""
200
191
run_mlm.py
201
- --model_name_or_path google/electra-small-discriminator
192
+ --model_name_or_path hf-internal-testing/tiny-random-DistilBertForMaskedLM
202
193
--dataset_name wikitext
203
194
--dataset_config_name wikitext-2-raw-v1
204
195
--apply_quantization
@@ -220,7 +211,6 @@ def test_run_mlm(self):
220
211
with patch .object (sys , "argv" , test_args ):
221
212
run_mlm .main ()
222
213
results = get_results (tmp_dir )
223
- self .assertLessEqual (results ["eval_loss" ], 15 )
224
214
225
215
226
216
if __name__ == "__main__" :
0 commit comments