forked from openvinotoolkit/open_model_zoo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsample_config.yml
74 lines (70 loc) · 3.73 KB
/
sample_config.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
models:
- name: SampLeNet_example
# list of launchers for your topology.
launchers:
# launcher framework (e.g. caffe, openvino)
- framework: openvino
# device for infer (e.g. for openvino cpu, gpu, hetero:cpu,gpu ...)
device: CPU
# model topology file (.xml, .onnx, e.t.c.)
# path to topology is prefixed with directory, specified in "-m/--models" option
# If you use dlsdk launcher, you can specify model in source framework format in this case Model Optimizer will be used for getting converted model.
model: SampLeNet.xml
# topology weights binary (*.bin) - optional, can be found automatically based on model path if required
weights: SampLeNet.bin
# launcher returns raw result, so it should be converted
# to an appropriate representation with adapter
adapter: classification
# batch size
batch: 1
# metrics, preprocessing and postprocessing are typically dataset specific, so dataset field
# specifies data and all other steps required to validate topology
# there is typically definitions file, which contains options for common datasets and which is merged
# during evaluation, but since "sample_dataset" is not used anywhere else, this config contains full definition
datasets:
# uniquely distinguishable name for dataset
# note that all other steps are specific for this dataset only
# if you need to test topology on multiple datasets, you need to specify
# every step explicitly for each dataset
- name: sample_dataset
# directory where input images are searched.
# prefixed with directory specified in "-s/--source" option
data_source: sample_dataset/test
# parameters for annotation conversion to a common annotation representation format.
annotation_conversion:
# specified which annotation converter will be used
# In order to do this you need to provide your own annotation converter,
# i.e. implement BaseFormatConverter interface.
# All annotation converters are stored in accuracy_checker/annotation_converters directory.
converter: cifar
# converter specific parameters.
# Full range available options you can find in accuracy_checker/annotation_converters/README.md
# relative paths will be merged with "-s/--source" option
data_batch_file: cifar-10-batches-py/test_batch
# cifar stores images as binary file, we should convert them to png in first evaluation.
# Yo do not need to use these options if you have already converted dataset images.
convert_images: True
# path to save converted images.
converted_images_dir: sample_dataset/test
# number of classes in the dataset, used for label_map generation
num_classes: 10
# list of preprocessing, applied to each image during validation
# order of entries matters
preprocessing:
# resize input image to topology input size
# you may specify size to which image should be resized
# via dst_width, dst_height fields
- type: resize
size: 32
# topology is trained on RGB images, but OpenCV reads in BGR
# thence it must be converted to RGB
- type: bgr_to_rgb
# dataset mean and standard deviation
- type: normalization
# you may specify precomputed statistics manually or use precomputed values, such as ImageNet as well
mean: (125.307, 122.961, 113.8575)
std: (51.5865, 50.847, 51.255)
# list of metrics, calculated on dataset
metrics:
- type: accuracy
top_k: 1