|
| 1 | +models: |
| 2 | + - name: SampLeNet_example |
| 3 | + |
| 4 | + # list of launchers for your topology. |
| 5 | + launchers: |
| 6 | + # launcher framework (e.g. caffe, dlsdk) |
| 7 | + - framework: dlsdk |
| 8 | + # device for infer (e.g. for dlsdk cpu, gpu, hetero:cpu,gpu ...) |
| 9 | + # Note: not all devices support blob execution, it is the reason why for sample we will use myriad. |
| 10 | + device: MYRIAD |
| 11 | + # exported executable network blob |
| 12 | + # you can export executable network blob from OpenVINO IR using compile_tool, see http://docs.openvinotoolkit.org/latest/_inference_engine_tools_compile_tool_README.html |
| 13 | + # path to model is prefixed with directory, specified in "-m/--models" option |
| 14 | + model: SampleNet.blob |
| 15 | + # launcher returns raw result, so it should be converted |
| 16 | + # to an appropriate representation with adapter |
| 17 | + adapter: classification |
| 18 | + |
| 19 | + # metrics, preprocessing and postprocessing are typically dataset specific, so dataset field |
| 20 | + # specifies data and all other steps required to validate topology |
| 21 | + # there is typically definitions file, which contains options for common datasets and which is merged |
| 22 | + # during evaluation, but since "sample_dataset" is not used anywhere else, this config contains full definition |
| 23 | + datasets: |
| 24 | + # uniquely distinguishable name for dataset |
| 25 | + # note that all other steps are specific for this dataset only |
| 26 | + # if you need to test topology on multiple datasets, you need to specify |
| 27 | + # every step explicitly for each dataset |
| 28 | + - name: sample_dataset |
| 29 | + # directory where input images are searched. |
| 30 | + # prefixed with directory specified in "-s/--source" option |
| 31 | + data_source: sample_dataset/test |
| 32 | + # parameters for annotation conversion to a common annotation representation format. |
| 33 | + annotation_conversion: |
| 34 | + # specified which annotation converter will be used |
| 35 | + # In order to do this you need to provide your own annotation converter, |
| 36 | + # i.e. implement BaseFormatConverter interface. |
| 37 | + # All annotation converters are stored in accuracy_checker/annotation_converters directory. |
| 38 | + converter: cifar |
| 39 | + # converter specific parameters. |
| 40 | + # Full range available options you can find in accuracy_checker/annotation_converters/README.md |
| 41 | + # relative paths will be merged with "-s/--source" option |
| 42 | + data_batch_file: cifar-10-batches-py/test_batch |
| 43 | + # cifar stores images as binary file, we should convert them to png in first evaluation. |
| 44 | + # Yo do not need to use these options if you have already converted dataset images. |
| 45 | + convert_images: True |
| 46 | + # path to save converted images. |
| 47 | + converted_images_dir: sample_dataset/test |
| 48 | + # number of classes in the dataset, used for label_map generation |
| 49 | + num_classes: 10 |
| 50 | + |
| 51 | + # list of preprocessing, applied to each image during validation |
| 52 | + # order of entries matters |
| 53 | + preprocessing: |
| 54 | + # resize input image to topology input size |
| 55 | + # you may specify size to which image should be resized |
| 56 | + # via dst_width, dst_height fields |
| 57 | + - type: resize |
| 58 | + size: 32 |
| 59 | + # topology is trained on RGB images, but OpenCV reads in BGR |
| 60 | + # thence it must be converted to RGB |
| 61 | + - type: bgr_to_rgb |
| 62 | + # dataset mean and standard deviation |
| 63 | + - type: normalization |
| 64 | + # you may specify precomputed statistics manually or use precomputed values, such as ImageNet as well |
| 65 | + mean: (125.307, 122.961, 113.8575) |
| 66 | + std: (51.5865, 50.847, 51.255) |
| 67 | + |
| 68 | + # list of metrics, calculated on dataset |
| 69 | + metrics: |
| 70 | + - type: accuracy |
| 71 | + top_k: 1 |
0 commit comments