Skip to content

Commit 0cd7fd1

Browse files
authored
OpenVINO extensions: custom operations support (#481)
1 parent a7dbb25 commit 0cd7fd1

30 files changed

+1995
-2
lines changed

.ci/azure/linux.yml

+19-1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ jobs:
4949
BIN_DIR: $(OPENVINO_REPO_DIR)/bin/intel64/$(BUILD_TYPE)
5050
INSTALL_DIR: $(WORK_DIR)/install_pkg
5151
SETUPVARS: $(INSTALL_DIR)/setupvars.sh
52+
CUSTOM_OP_LIB: $(BIN_DIR)/libuser_ov_extensions.so
5253
GRADLE_VER: 7.1.1
5354

5455
steps:
@@ -93,7 +94,7 @@ jobs:
9394
set -e
9495
sudo -E $(OPENVINO_REPO_DIR)/install_build_dependencies.sh
9596
# Move jdk into contrib install_build_dependencies.sh
96-
sudo apt --assume-yes install default-jdk
97+
sudo apt --assume-yes install default-jdk libopencv-dev
9798
# Install gradle for Java
9899
sudo apt -y --no-install-recommends install unzip
99100
wget https://services.gradle.org/distributions/gradle-$(GRADLE_VER)-bin.zip
@@ -155,3 +156,20 @@ jobs:
155156
done
156157
workingDirectory: $(REPO_DIR)/modules/java_api
157158
displayName: 'Java tests'
159+
160+
- script: |
161+
python3 -m pip install --user virtualenv
162+
python3 -m virtualenv -p /usr/bin/python3.8 .env3
163+
source .env3/bin/activate
164+
python -m pip install --upgrade pip
165+
python -m pip install -r $(REPO_DIR)/modules/custom_operations/tests/requirements.txt
166+
cd ${OPENVINO_REPO_DIR}/tools && python -m pip install mo/
167+
workingDirectory: $(WORK_DIR)
168+
displayName: 'Create custom ops env'
169+
170+
- script: |
171+
. $(SETUPVARS)
172+
source $(WORK_DIR)/.env3/bin/activate
173+
python -m pytest tests/run_tests.py
174+
workingDirectory: $(REPO_DIR)/modules/custom_operations
175+
displayName: 'Custom ops tests'

.ci/azure/windows.yml

+5-1
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,12 @@ jobs:
4545
MODELS_PATH: $(REPO_DIR)\..\testdata
4646
WORK_DIR: $(Pipeline.Workspace)\_w
4747
BUILD_DIR: D:\build
48-
BIN_DIR: $(OPENVINO_REPO_DIR)\bin\intel64
48+
BIN_DIR: $(OPENVINO_REPO_DIR)\bin\intel64\$(BUILD_TYPE)
4949
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
5050
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
5151
INSTALL_DIR: $(WORK_DIR)\install_pkg
5252
SETUPVARS: $(INSTALL_DIR)\setupvars.bat
53+
CUSTOM_OP_LIB: $(BIN_DIR)\user_ov_extensions.dll
5354
GRADLE_VER: 7.1.1
5455

5556
steps:
@@ -96,6 +97,8 @@ jobs:
9697
powershell -command "Expand-Archive -Force ninja-win.zip"
9798
powershell -command "Invoke-WebRequest https://services.gradle.org/distributions/gradle-$(GRADLE_VER)-bin.zip -OutFile gradle-$(GRADLE_VER)-bin.zip"
9899
powershell -command "Expand-Archive -Force gradle-$(GRADLE_VER)-bin.zip"
100+
python -m pip install --upgrade pip
101+
python -m pip install -r $(OPENVINO_REPO_DIR)\src\bindings\python\src\compatibility\openvino\requirements-dev.txt
99102
workingDirectory: $(WORK_DIR)
100103
displayName: 'Install dependencies'
101104
@@ -107,6 +110,7 @@ jobs:
107110
-DENABLE_INTEL_MYRIAD_COMMON=OFF ^
108111
-DENABLE_CPPLINT=OFF ^
109112
-DOPENVINO_EXTRA_MODULES=$(REPO_DIR)/modules ^
113+
-DENABLE_PYTHON=ON ^
110114
-DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" ^
111115
-DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" ^
112116
$(OPENVINO_REPO_DIR)

README.md

+2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ This list gives an overview of all modules available inside the contrib reposito
1212
* [**nvidia_plugin**](./modules/nvidia_plugin): NVIDIA GPU Plugin -- allows to perform deep neural networks inference on NVIDIA GPUs using CUDA, using OpenVINO API.
1313
* [**java_api**](./modules/java_api): Inference Engine Java API -- provides Java wrappers for Inference Engine public API.
1414
* [**Azure Video Analyzer**](./modules/ovms_ai_extension/): Azure Video Analyzer Extension -- enables exchange of video frames and inference results between [Azure Video Analyzer (AVA)](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/overview) and OpenVINO™ Model Server.
15+
* [**custom_operations**](./modules/custom_operations/): Collection of Custom Operations -- implement Custom Operations with OpenVINO Extensibility Mechanism.
1516

1617
## How to build OpenVINO with extra modules
1718
You can build OpenVINO, so it will include the modules from this repository. Contrib modules are under constant development and it is recommended to use them alongside the master branch or latest releases of OpenVINO.
@@ -35,6 +36,7 @@ Additional build instructions are available for the following modules:
3536
* [**arm_plugin**](./modules/arm_plugin/README.md)
3637
* [**nvidia_plugin**](./modules/nvidia_plugin/README.md)
3738
* [**ovms_ai_extension**](./modules/ovms_ai_extension/README.md)
39+
* [**custom_operations**](./modules/custom_operations/README.md)
3840

3941
## Update the repository documentation
4042
In order to keep a clean overview containing all contributed modules, the following files need to be created/adapted:
+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# Copyright (C) 2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
#
4+
5+
cmake_minimum_required(VERSION 3.13)
6+
project(openvino_extensions)
7+
8+
add_subdirectory(user_ie_extensions)

modules/custom_operations/README.md

+66
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# Collection of Custom Operations using OpenVINO Extensibility Mechanism
2+
3+
This module provides a guide and the implementation of a few custom operations in the Intel OpenVINO runtime using its [Extensibility Mechanism](https://docs.openvino.ai/latest/openvino_docs_Extensibility_UG_Intro.html).
4+
5+
There are some use cases when OpenVINO Custom Operations could be applicable:
6+
7+
* There is an ONNX model which contains an operation not supported by OpenVINO.
8+
* You have a PyTorch model, which could be converted to ONNX, with an operation not supported by OpenVINO.
9+
* You want to replace a subgraph for ONNX model with one custom operation which would be supported by OpenVINO.
10+
11+
More specifically, here we implement custom OpenVINO operations that add support for the following native PyTorch operation:
12+
13+
* [torch.fft](examples/fft)
14+
15+
And other custom operations introduced by third-party frameworks:
16+
17+
* [calculate_grid](/examples/calculate_grid) and [sparse_conv](/examples/sparse_conv) from [Open3D](https://github.com/isl-org/Open3D)
18+
* [complex_mul](/examples/complex_mul) from [DIRECT](https://github.com/NKI-AI/direct)
19+
20+
You can find more information about how to create and use OpenVINO Extensions to facilitate mapping of custom operations from framework model representation to OpenVINO representation [here](https://docs.openvino.ai/latest/openvino_docs_Extensibility_UG_Frontend_Extensions.html).
21+
22+
23+
## Build custom OpenVINO operation extension library
24+
25+
The C++ code implementing the custom operation is in the `user_ie_extensions` directory. You'll have to build an "extension library" from this code so that it can be loaded at runtime. The steps below describe the build process:
26+
27+
1. Install [OpenVINO Runtime for C++](https://docs.openvino.ai/latest/openvino_docs_install_guides_install_dev_tools.html#for-c-developers).
28+
29+
2. Build the library:
30+
31+
```bash
32+
cd user_ie_extensions
33+
mkdir build && cd build
34+
cmake .. -DCMAKE_BUILD_TYPE=Release && cmake --build . --parallel 4
35+
```
36+
37+
If you need to build only some operations specify them with the `-DCUSTOM_OPERATIONS` option:
38+
```bash
39+
cmake .. -DCMAKE_BUILD_TYPE=Release -DCUSTOM_OPERATIONS=complex_mul\;fft
40+
```
41+
42+
- Please note that [OpenCV](https://opencv.org/) installation is required to build an extension for the [fft](examples/fft) operation. Other extentions still can be built without OpenCV.
43+
44+
You also could build the extension library [while building OpenVINO](../../README.md).
45+
46+
## Load and use custom OpenVINO operation extension library
47+
48+
You can use the custom OpenVINO operations implementation by loading it into the OpenVINO `Core` object at runtime. Then, load the model from the ONNX file with the `read_model()` API. Here's how to do that in Python:
49+
50+
```python
51+
from openvino.runtime import Core
52+
53+
# Create Core and register user extension
54+
core = Core()
55+
core.add_extension('/path/to/libuser_ov_extensions.so')
56+
57+
# Load model from .onnx file directly
58+
model = core.read_model('model.onnx')
59+
compiled_model = core.compile_model(model, 'CPU')
60+
```
61+
62+
You also can get OpenVINO IR model with Model Optimizer, just use extra `--extension` flag to specify a path to custom extensions:
63+
64+
```bash
65+
mo --input_model model.onnx --extension /path/to/libuser_ov_extensions.so
66+
```
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# Copyright (C) 2018-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import torch
5+
import torch.nn as nn
6+
import torch.nn.functional as F
7+
8+
class CalculateGrid(torch.autograd.Function):
9+
@staticmethod
10+
def symbolic(g, in_positions):
11+
return g.op("CalculateGrid", in_positions)
12+
13+
@staticmethod
14+
def forward(self, in_positions):
15+
filter = torch.Tensor([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0],
16+
[0, -1, -1], [0, -1, 0], [0, 0, -1],
17+
[0, 0, 0]]).to(in_positions.device)
18+
19+
out_pos = in_positions.long().repeat(1, filter.shape[0]).reshape(-1, 3)
20+
filter = filter.repeat(in_positions.shape[0], 1)
21+
22+
out_pos = out_pos + filter
23+
out_pos = out_pos[out_pos.min(1).values >= 0]
24+
out_pos = out_pos[(~((out_pos.long() % 2).bool()).any(1))]
25+
out_pos = torch.unique(out_pos, dim=0)
26+
27+
return out_pos + 0.5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# Copyright (C) 2018-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import numpy as np
5+
import argparse
6+
import torch
7+
import torch.nn as nn
8+
from torch.autograd import Variable
9+
from .calculate_grid import CalculateGrid
10+
11+
12+
class MyModel(nn.Module):
13+
def __init__(self):
14+
super(MyModel, self).__init__()
15+
self.calculate_grid = CalculateGrid()
16+
17+
def forward(self, x):
18+
return self.calculate_grid.apply(x)
19+
20+
21+
def export(num_points, max_grid_extent):
22+
# Generate a list of unique positions and add a mantissa
23+
np.random.seed(32)
24+
torch.manual_seed(11)
25+
26+
inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3])
27+
inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1)
28+
29+
model = MyModel()
30+
with torch.no_grad():
31+
torch.onnx.export(model, (inp_pos), 'model.onnx',
32+
input_names=['input'],
33+
output_names=['output'],
34+
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
35+
36+
ref = model(inp_pos).detach().numpy()
37+
38+
# Pad with sentinel values (-1, 0, 0) and zeros
39+
ref = np.concatenate((ref, [[-1, 0, 0]]))
40+
ref = np.pad(ref, ((0, inp_pos.shape[0] - ref.shape[0]), (0, 0)))
41+
42+
return [inp_pos.detach().numpy()], ref
43+
44+
45+
if __name__ == "__main__":
46+
parser = argparse.ArgumentParser(description='Generate ONNX model and test data')
47+
parser.add_argument('--num_points', type=int, default=10)
48+
parser.add_argument('--max_grid_extent', type=int, default=5)
49+
args = parser.parse_args()
50+
51+
export(args.num_points, args.max_grid_extent)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Copyright (C) 2018-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import torch
5+
import torch.nn as nn
6+
7+
class ComplexMul(torch.autograd.Function):
8+
@staticmethod
9+
def symbolic(g, input_tensor, other_tensor, is_conj = True):
10+
return g.op("ComplexMultiplication", input_tensor, other_tensor, is_conj_i=int(is_conj))
11+
12+
@staticmethod
13+
def forward(self, input_tensor, other_tensor):
14+
complex_index = -1
15+
real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1]
16+
imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0]
17+
18+
multiplication = torch.cat(
19+
[
20+
real_part.unsqueeze(dim=complex_index),
21+
imaginary_part.unsqueeze(dim=complex_index),
22+
],
23+
dim=complex_index,
24+
)
25+
return multiplication
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Copyright (C) 2018-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import numpy as np
5+
import argparse
6+
import torch
7+
import torch.nn as nn
8+
from torch.autograd import Variable
9+
from .complex_mul import ComplexMul
10+
11+
class MyModel(nn.Module):
12+
def __init__(self):
13+
super(MyModel, self).__init__()
14+
self.complex_mul = ComplexMul()
15+
16+
def forward(self, x, y):
17+
return self.complex_mul.apply(x, y)
18+
19+
def export(inp_shape=[3, 2, 4, 8, 2], other_shape=[3, 2, 4, 8, 2]):
20+
np.random.seed(324)
21+
torch.manual_seed(32)
22+
23+
model = MyModel()
24+
inp = Variable(torch.randn(inp_shape))
25+
inp1 = Variable(torch.randn(other_shape))
26+
model.eval()
27+
28+
with torch.no_grad():
29+
torch.onnx.export(model, (inp, inp1), 'model.onnx',
30+
input_names=['input', 'input1'],
31+
output_names=['output'],
32+
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
33+
34+
ref = model(inp, inp1)
35+
return [inp.detach().numpy(), inp1.detach().numpy()], ref.detach().numpy()
36+
37+
38+
if __name__ == "__main__":
39+
parser = argparse.ArgumentParser(description='Generate ONNX model and test data')
40+
parser.add_argument('--inp_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2])
41+
parser.add_argument('--other_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2])
42+
args = parser.parse_args()
43+
44+
export(args.inp_shape, args.other_shape)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Copyright (C) 2018-2022 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import numpy as np
5+
import argparse
6+
import torch
7+
import torch.nn as nn
8+
from torch.autograd import Variable
9+
from .fft import FFT
10+
11+
12+
class MyModel(nn.Module):
13+
def __init__(self, inverse, centered, dims):
14+
super(MyModel, self).__init__()
15+
self.inverse = inverse
16+
self.centered = centered
17+
self.dims = dims
18+
self.fft = FFT()
19+
20+
def forward(self, x):
21+
return self.fft.apply(x, self.inverse, self.centered, self.dims)
22+
23+
24+
def export(shape, inverse, centered, dims):
25+
np.random.seed(324)
26+
torch.manual_seed(32)
27+
28+
model = MyModel(inverse, centered, dims)
29+
inp = Variable(torch.randn(shape))
30+
model.eval()
31+
32+
with torch.no_grad():
33+
torch.onnx.export(model, inp, 'model.onnx',
34+
input_names=['input'],
35+
output_names=['output'],
36+
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
37+
38+
ref = model(inp)
39+
return [inp.detach().numpy()], ref.detach().numpy()
40+
41+
42+
if __name__ == '__main__':
43+
parser = argparse.ArgumentParser(description='Generate ONNX model and test data')
44+
parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8, 2])
45+
parser.add_argument('--inverse', action='store_true')
46+
parser.add_argument('--centered', action='store_true')
47+
parser.add_argument('--dims', type=int, nargs='+', default=[2, 3])
48+
args = parser.parse_args()
49+
export(args.shape, args.inverse, args.centered, args.dims)

0 commit comments

Comments
 (0)