Skip to content

Commit 542aa89

Browse files
Merge remote-tracking branch 'upstream/main' into longjie/add_parallel_cross_entropy
2 parents d98e9ba + 29f23f1 commit 542aa89

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

examples/onnxruntime/training/image-classification/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ torchrun --nproc_per_node=NUM_GPUS_YOU_HAVE run_image_classification.py \
3939
--per_device_eval_batch_size 32 \
4040
--logging_strategy steps \
4141
--logging_steps 10 \
42-
--evaluation_strategy epoch \
42+
--eval_strategy epoch \
4343
--seed 1337
4444
```
4545

optimum/onnxruntime/training_args.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -117,32 +117,32 @@ def __post_init__(self):
117117
if self.disable_tqdm is None:
118118
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
119119

120-
if isinstance(self.evaluation_strategy, EvaluationStrategy):
120+
if isinstance(self.eval_strategy, EvaluationStrategy):
121121
warnings.warn(
122-
"using `EvaluationStrategy` for `evaluation_strategy` is deprecated and will be removed in version 5"
122+
"using `EvaluationStrategy` for `eval_strategy` is deprecated and will be removed in version 5"
123123
" of 🤗 Transformers. Use `IntervalStrategy` instead",
124124
FutureWarning,
125125
)
126126
# Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it.
127-
self.evaluation_strategy = self.evaluation_strategy.value
127+
self.eval_strategy = self.eval_strategy.value
128128

129-
self.evaluation_strategy = IntervalStrategy(self.evaluation_strategy)
129+
self.eval_strategy = IntervalStrategy(self.eval_strategy)
130130
self.logging_strategy = IntervalStrategy(self.logging_strategy)
131131
self.save_strategy = IntervalStrategy(self.save_strategy)
132132
self.hub_strategy = HubStrategy(self.hub_strategy)
133133

134134
self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type)
135-
if self.do_eval is False and self.evaluation_strategy != IntervalStrategy.NO:
135+
if self.do_eval is False and self.eval_strategy != IntervalStrategy.NO:
136136
self.do_eval = True
137137

138138
# eval_steps has to be defined and non-zero, fallbacks to logging_steps if the latter is non-zero
139-
if self.evaluation_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0):
139+
if self.eval_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0):
140140
if self.logging_steps > 0:
141141
logger.info(f"using `logging_steps` to initialize `eval_steps` to {self.logging_steps}")
142142
self.eval_steps = self.logging_steps
143143
else:
144144
raise ValueError(
145-
f"evaluation strategy {self.evaluation_strategy} requires either non-zero --eval_steps or"
145+
f"evaluation strategy {self.eval_strategy} requires either non-zero --eval_steps or"
146146
" --logging_steps"
147147
)
148148

@@ -154,7 +154,7 @@ def __post_init__(self):
154154
if self.logging_steps != int(self.logging_steps):
155155
raise ValueError(f"--logging_steps must be an integer if bigger than 1: {self.logging_steps}")
156156
self.logging_steps = int(self.logging_steps)
157-
if self.evaluation_strategy == IntervalStrategy.STEPS and self.eval_steps > 1:
157+
if self.eval_strategy == IntervalStrategy.STEPS and self.eval_steps > 1:
158158
if self.eval_steps != int(self.eval_steps):
159159
raise ValueError(f"--eval_steps must be an integer if bigger than 1: {self.eval_steps}")
160160
self.eval_steps = int(self.eval_steps)
@@ -165,13 +165,13 @@ def __post_init__(self):
165165

166166
# Sanity checks for load_best_model_at_end: we require save and eval strategies to be compatible.
167167
if self.load_best_model_at_end:
168-
if self.evaluation_strategy != self.save_strategy:
168+
if self.eval_strategy != self.save_strategy:
169169
raise ValueError(
170170
"--load_best_model_at_end requires the saving steps to be a multiple of the evaluation "
171171
"steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps "
172172
f"{self.save_steps} and eval_steps {self.eval_steps}."
173173
)
174-
if self.evaluation_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0:
174+
if self.eval_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0:
175175
if self.eval_steps < 1 or self.save_steps < 1:
176176
if not (self.eval_steps < 1 and self.save_steps < 1):
177177
raise ValueError(
@@ -244,7 +244,7 @@ def __post_init__(self):
244244
)
245245

246246
if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU:
247-
if self.evaluation_strategy == IntervalStrategy.NO:
247+
if self.eval_strategy == IntervalStrategy.NO:
248248
raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires an eval strategy")
249249
if not is_torch_available():
250250
raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0")

0 commit comments

Comments
 (0)