forked from intel/neural-compressor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_benchmark.sh
executable file
·160 lines (145 loc) · 2.85 KB
/
run_benchmark.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
#!/bin/bash
set -x
function main {
init_params "$@"
define_mode
run_benchmark
}
# init params
function init_params {
iters=100
for var in "$@"
do
case $var in
--topology=*)
topology=$(echo $var |cut -f2 -d=)
;;
--dataset_location=*)
dataset_location=$(echo $var |cut -f2 -d=)
;;
--input_model=*)
input_model=$(echo $var |cut -f2 -d=)
;;
--mode=*)
mode=$(echo $var |cut -f2 -d=)
;;
--batch_size=*)
batch_size=$(echo $var |cut -f2 -d=)
;;
--iters=*)
iters=$(echo ${var} |cut -f2 -d=)
;;
*)
echo "Error: No such parameter: ${var}"
exit 1
;;
esac
done
}
function define_mode {
if [[ ${mode} == "accuracy" ]]; then
echo "For TF OOB models, there is only benchmark mode!, num iter is: ${iters}"
exit 1
elif [[ ${mode} == "benchmark" ]]; then
mode_cmd=" --benchmark "
else
echo "Error: No such mode: ${mode}"
exit 1
fi
}
models_need_name=(
--------
CRNN
CapsuleNet
CenterNet
CharCNN
Hierarchical_LSTM
MANN
MiniGo
TextCNN
TextRNN
aipg-vdcnn
arttrack-coco-multi
arttrack-mpii-single
context_rcnn_resnet101_snapshot_serenget
deepspeech
deepvariant_wgs
dense_vnet_abdominal_ct
east_resnet_v1_50
efficientnet-b0
efficientnet-b0_auto_aug
efficientnet-b5
efficientnet-b7_auto_aug
facenet-20180408-102900
handwritten-score-recognition-0003
license-plate-recognition-barrier-0007
optical_character_recognition-text_recognition-tf
pose-ae-multiperson
pose-ae-refinement
resnet_v2_200
show_and_tell
text-recognition-0012
vggvox
wide_deep
yolo-v3-tiny
NeuMF
PRNet
DIEN_Deep-Interest-Evolution-Network
--------
)
models_need_disable_optimize=(
--------
CRNN
efficientnet-b0
efficientnet-b0_auto_aug
efficientnet-b5
efficientnet-b7_auto_aug
vggvox
--------
)
# neural_compressor graph_def
models_need_nc_graphdef=(
--------
pose-ae-multiperson
pose-ae-refinement
centernet_hg104
DETR
Elmo
Time_series_LSTM
Unet
WD
ResNest101
ResNest50
ResNest50-3D
adversarial_text
Attention_OCR
AttRec
GPT2
Parallel_WaveNet
PNASNet-5
VAE-CF
DLRM
Deep_Speech_2
--------
)
# run_tuning
function run_benchmark {
extra_cmd=" --num_iter ${iters} --num_warmup 10 "
if [[ "${models_need_name[@]}" =~ " ${topology} " ]]; then
echo "$topology need model name!"
extra_cmd+=" --model_name ${topology} "
fi
if [[ "${models_need_disable_optimize[@]}" =~ " ${topology} " ]]; then
echo "$topology need to disable optimize_for_inference!"
extra_cmd+=" --disable_optimize "
fi
if [[ "${models_need_nc_graphdef[@]}" =~ " ${topology} " ]]; then
echo "$topology need neural_compressor graph_def!"
extra_cmd+=" --use_nc "
fi
python tf_benchmark.py \
--model_path ${input_model} \
${extra_cmd} \
${mode_cmd}
}
main "$@"