Skip to content

Commit aa92d13

Browse files
Updates properties (openvinotoolkit#2403)
CVS-120640
1 parent 55da9d5 commit aa92d13

File tree

40 files changed

+176
-85
lines changed

40 files changed

+176
-85
lines changed

.ci/skipped_notebooks.yml

+5-1
Original file line numberDiff line numberDiff line change
@@ -586,4 +586,8 @@
586586
- python:
587587
- '3.8'
588588
- os:
589-
- macos-12
589+
- macos-12
590+
- notebook: notebooks/llm-agent-react/llm-agent-react-langchain.ipynb
591+
skips:
592+
- python:
593+
- '3.8'

.ci/spellcheck/.pyspelling.wordlist.txt

+1
Original file line numberDiff line numberDiff line change
@@ -617,6 +617,7 @@ perceptron
617617
Patil
618618
PEFT
619619
perceiver
620+
PerformanceMode
620621
performant
621622
PersonaGPT
622623
PGI

notebooks/auto-device/auto-device.ipynb

+11-5
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
"import platform\n",
7171
"\n",
7272
"# Install required packages\n",
73-
"%pip install -q \"openvino>=2023.1.0\" Pillow torch torchvision tqdm --extra-index-url https://download.pytorch.org/whl/cpu\n",
73+
"%pip install -q \"openvino>=2023.1.0\" \"numpy<2\" Pillow torch torchvision tqdm --extra-index-url https://download.pytorch.org/whl/cpu\n",
7474
"\n",
7575
"if platform.system() != \"Windows\":\n",
7676
" %pip install -q \"matplotlib>=3.4\"\n",
@@ -187,8 +187,11 @@
187187
}
188188
],
189189
"source": [
190+
"import openvino.properties.log as log\n",
191+
"\n",
192+
"\n",
190193
"# Set LOG_LEVEL to LOG_INFO.\n",
191-
"core.set_property(\"AUTO\", {\"LOG_LEVEL\": \"LOG_INFO\"})\n",
194+
"core.set_property(\"AUTO\", {log.level(): log.Level.INFO})\n",
192195
"\n",
193196
"# Load the model onto the target device.\n",
194197
"compiled_model = core.compile_model(ov_model)\n",
@@ -249,7 +252,7 @@
249252
],
250253
"source": [
251254
"# Set LOG_LEVEL to LOG_NONE.\n",
252-
"core.set_property(\"AUTO\", {\"LOG_LEVEL\": \"LOG_NONE\"})\n",
255+
"core.set_property(\"AUTO\", {log.level(): log.Level.NO})\n",
253256
"\n",
254257
"compiled_model = core.compile_model(model=ov_model, device_name=\"AUTO\")\n",
255258
"\n",
@@ -611,12 +614,15 @@
611614
}
612615
],
613616
"source": [
617+
"import openvino.properties.hint as hints\n",
618+
"\n",
619+
"\n",
614620
"THROUGHPUT_hint_context = InferContext(metrics_update_interval, metrics_update_num)\n",
615621
"\n",
616622
"print(\"Compiling Model for AUTO device with THROUGHPUT hint\")\n",
617623
"sys.stdout.flush()\n",
618624
"\n",
619-
"compiled_model = core.compile_model(model=ov_model, config={\"PERFORMANCE_HINT\": \"THROUGHPUT\"})\n",
625+
"compiled_model = core.compile_model(model=ov_model, config={hints.performance_mode(): hints.PerformanceMode.THROUGHPUT})\n",
620626
"\n",
621627
"infer_queue = ov.AsyncInferQueue(compiled_model, 0) # Setting to 0 will query optimal number by default.\n",
622628
"infer_queue.set_callback(completion_callback)\n",
@@ -680,7 +686,7 @@
680686
"print(\"Compiling Model for AUTO Device with LATENCY hint\")\n",
681687
"sys.stdout.flush()\n",
682688
"\n",
683-
"compiled_model = core.compile_model(model=ov_model, config={\"PERFORMANCE_HINT\": \"LATENCY\"})\n",
689+
"compiled_model = core.compile_model(model=ov_model, config={hints.performance_mode(): hints.PerformanceMode.LATENCY})\n",
684690
"\n",
685691
"# Setting to 0 will query optimal number by default.\n",
686692
"infer_queue = ov.AsyncInferQueue(compiled_model, 0)\n",

notebooks/clip-language-saliency-map/clip-language-saliency-map.ipynb

+3-2
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@
9090
"source": [
9191
"# Install requirements\n",
9292
"%pip install -q \"openvino>=2023.1.0\"\n",
93-
"%pip install -q --extra-index-url https://download.pytorch.org/whl/cpu transformers \"torch>=2.1\" \"gradio>=4.19\""
93+
"%pip install -q --extra-index-url https://download.pytorch.org/whl/cpu transformers \"numpy<2\" \"torch>=2.1\" \"gradio>=4.19\""
9494
]
9595
},
9696
{
@@ -759,14 +759,15 @@
759759
"outputs": [],
760760
"source": [
761761
"from typing import Dict, Any\n",
762+
"import openvino.properties.hint as hints\n",
762763
"\n",
763764
"\n",
764765
"image_model = core.read_model(image_model_path)\n",
765766
"\n",
766767
"image_model = core.compile_model(\n",
767768
" model=image_model,\n",
768769
" device_name=device.value,\n",
769-
" config={\"PERFORMANCE_HINT\": \"THROUGHPUT\"},\n",
770+
" config={hints.performance_mode(): hints.PerformanceMode.THROUGHPUT},\n",
770771
")"
771772
]
772773
},

notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb

+7-2
Original file line numberDiff line numberDiff line change
@@ -1105,11 +1105,13 @@
11051105
"source": [
11061106
"from typing import Any\n",
11071107
"\n",
1108+
"import openvino.properties.hint as hints\n",
1109+
"\n",
11081110
"\n",
11091111
"compiled_throughput_hint = core.compile_model(\n",
11101112
" ov_model,\n",
11111113
" device_name=device.value,\n",
1112-
" config={\"PERFORMANCE_HINT\": \"THROUGHPUT\"},\n",
1114+
" config={hints.performance_mode(): hints.PerformanceMode.THROUGHPUT},\n",
11131115
")"
11141116
]
11151117
},
@@ -1323,7 +1325,10 @@
13231325
}
13241326
],
13251327
"source": [
1326-
"cpu_name = core.get_property(\"CPU\", \"FULL_DEVICE_NAME\")\n",
1328+
"import openvino.properties as props\n",
1329+
"\n",
1330+
"\n",
1331+
"cpu_name = core.get_property(\"CPU\", props.device.full_name)\n",
13271332
"\n",
13281333
"plot = sns.barplot(benchmark_dataframe, errorbar=\"sd\")\n",
13291334
"plot.set(ylabel=\"Sentences Per Second\", title=f\"Sentence Embeddings Benchmark\\n{cpu_name}\")\n",

notebooks/ct-segmentation-quantize/async_pipeline.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
import cv2
2626

27+
import openvino.properties as props
2728
from custom_segmentation import Model
2829

2930

@@ -169,15 +170,15 @@ def __init__(self, ie, model, plugin_config, device="CPU", max_num_requests=0):
169170
cache_path.mkdir(exist_ok=True)
170171
# Enable model caching for GPU devices
171172
if "GPU" in device and "GPU" in ie.available_devices:
172-
ie.set_property(device_name="GPU", properties={"CACHE_DIR": str(cache_path)})
173+
ie.set_property(device_name="GPU", properties={props.cache_dir(): str(cache_path)})
173174

174175
self.model = model
175176
self.logger = logging.getLogger()
176177

177178
self.logger.info("Loading network to {} plugin...".format(device))
178179
self.exec_net = ie.compile_model(self.model.net, device, plugin_config)
179180
if max_num_requests == 0:
180-
max_num_requests = self.exec_net.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS") + 1
181+
max_num_requests = self.exec_net.get_property(props.optimal_number_of_infer_requests) + 1
181182
self.requests = [self.exec_net.create_infer_request() for _ in range(max_num_requests)]
182183
self.empty_requests = deque(self.requests)
183184
self.completed_request_results = {}

notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb

+7-1
Original file line numberDiff line numberDiff line change
@@ -564,9 +564,15 @@
564564
],
565565
"source": [
566566
"from pathlib import Path\n",
567+
"\n",
567568
"from transformers import AutoTokenizer\n",
568569
"from optimum.intel.openvino import OVModelForCausalLM\n",
569570
"\n",
571+
"import openvino.properties as props\n",
572+
"import openvino.properties.hint as hints\n",
573+
"import openvino.properties.streams as streams\n",
574+
"\n",
575+
"\n",
570576
"if model_to_run.value == \"INT4\":\n",
571577
" model_dir = int4_model_dir\n",
572578
"elif model_to_run.value == \"INT8\":\n",
@@ -579,7 +585,7 @@
579585
"\n",
580586
"current_device = device.value\n",
581587
"\n",
582-
"ov_config = {\"PERFORMANCE_HINT\": \"LATENCY\", \"NUM_STREAMS\": \"1\", \"CACHE_DIR\": \"\"}\n",
588+
"ov_config = {hints.performance_mode(): hints.PerformanceMode.LATENCY, streams.num(): \"1\", props.cache_dir(): \"\"}\n",
583589
"\n",
584590
"ov_model = OVModelForCausalLM.from_pretrained(model_dir, device=current_device, ov_config=ov_config)"
585591
]

notebooks/florence2/gradio_helper.py

-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ def plot_bbox(image, data):
6363

6464

6565
def draw_polygons(image, prediction, fill_mask=False):
66-
6766
draw = ImageDraw.Draw(image)
6867
scale = 1
6968
for polygons, label in zip(prediction["polygons"], prediction["labels"]):

notebooks/florence2/ov_florence2_helper.py

-1
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,6 @@ def __init__(self, model_dir, device, ov_config=None) -> None:
353353
self.language_model = OVFlorence2LangModel(model_dir, self.config.text_config, device, ov_config)
354354

355355
def generate(self, input_ids, inputs_embeds=None, pixel_values=None, **kwargs):
356-
357356
if inputs_embeds is None:
358357
# 1. Extra the input embeddings
359358
if input_ids is not None:

notebooks/gpu-device/gpu-device.ipynb

+20-12
Original file line numberDiff line numberDiff line change
@@ -256,9 +256,12 @@
256256
}
257257
],
258258
"source": [
259+
"import openvino.properties as props\n",
260+
"\n",
261+
"\n",
259262
"device = \"GPU\"\n",
260263
"\n",
261-
"core.get_property(device, \"FULL_DEVICE_NAME\")"
264+
"core.get_property(device, props.device.full_name)"
262265
]
263266
},
264267
{
@@ -267,7 +270,7 @@
267270
"id": "aac3129a-129f-49aa-aba0-71ae1e892ada",
268271
"metadata": {},
269272
"source": [
270-
"Each device also has a specific property called `SUPPORTED_PROPERTIES`, that enables viewing all the available properties in the device. We can check the value for each property by simply looping through the dictionary returned by `core.get_property(\"GPU\", \"SUPPORTED_PROPERTIES\")` and then querying for that property."
273+
"Each device also has a specific property called `SUPPORTED_PROPERTIES`, that enables viewing all the available properties in the device. We can check the value for each property by simply looping through the dictionary returned by `core.get_property(\"GPU\", props.supported_properties)` and then querying for that property."
271274
]
272275
},
273276
{
@@ -321,7 +324,7 @@
321324
],
322325
"source": [
323326
"print(f\"{device} SUPPORTED_PROPERTIES:\\n\")\n",
324-
"supported_properties = core.get_property(device, \"SUPPORTED_PROPERTIES\")\n",
327+
"supported_properties = core.get_property(device, props.supported_properties)\n",
325328
"indent = len(max(supported_properties, key=len))\n",
326329
"\n",
327330
"for property_key in supported_properties:\n",
@@ -677,7 +680,7 @@
677680
"core = ov.Core()\n",
678681
"\n",
679682
"# Set cache folder\n",
680-
"core.set_property({\"CACHE_DIR\": cache_folder})\n",
683+
"core.set_property({props.cache_dir(): cache_folder})\n",
681684
"\n",
682685
"# Compile the model as before\n",
683686
"model = core.read_model(model=model_path)\n",
@@ -717,7 +720,7 @@
717720
"source": [
718721
"start = time.time()\n",
719722
"core = ov.Core()\n",
720-
"core.set_property({\"CACHE_DIR\": \"cache\"})\n",
723+
"core.set_property({props.cache_dir(): \"cache\"})\n",
721724
"model = core.read_model(model=model_path)\n",
722725
"compiled_model = core.compile_model(model, device)\n",
723726
"print(f\"Cache enabled - compile time: {time.time() - start}s\")\n",
@@ -765,7 +768,7 @@
765768
"id": "7077b662-22f3-4c52-9c80-e5ac1309c482",
766769
"metadata": {},
767770
"source": [
768-
"To use the \"LATENCY\" performance hint, add `{\"PERFORMANCE_HINT\": \"LATENCY\"}` when compiling the model as shown below. For GPUs, this automatically minimizes the batch size and number of parallel streams such that all of the compute resources can focus on completing a single inference as fast as possible."
771+
"To use the \"LATENCY\" performance hint, add `{hints.performance_mode(): hints.PerformanceMode.LATENCY}` when compiling the model as shown below. For GPUs, this automatically minimizes the batch size and number of parallel streams such that all of the compute resources can focus on completing a single inference as fast as possible."
769772
]
770773
},
771774
{
@@ -780,7 +783,10 @@
780783
},
781784
"outputs": [],
782785
"source": [
783-
"compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"LATENCY\"})"
786+
"import openvino.properties.hint as hints\n",
787+
"\n",
788+
"\n",
789+
"compiled_model = core.compile_model(model, device, {hints.performance_mode(): hints.PerformanceMode.LATENCY})"
784790
]
785791
},
786792
{
@@ -789,7 +795,7 @@
789795
"id": "06589f38-ce35-457f-8395-a4a3f6327ea0",
790796
"metadata": {},
791797
"source": [
792-
"To use the \"THROUGHPUT\" performance hint, add `{\"PERFORMANCE_HINT\": \"THROUGHPUT\"}` when compiling the model. For GPUs, this creates multiple processing streams to efficiently utilize all the execution cores and optimizes the batch size to fill the available memory."
798+
"To use the \"THROUGHPUT\" performance hint, add `{hints.performance_mode(): hints.PerformanceMode.THROUGHPUT}` when compiling the model. For GPUs, this creates multiple processing streams to efficiently utilize all the execution cores and optimizes the batch size to fill the available memory."
793799
]
794800
},
795801
{
@@ -804,7 +810,7 @@
804810
},
805811
"outputs": [],
806812
"source": [
807-
"compiled_model = core.compile_model(model, device, {\"PERFORMANCE_HINT\": \"THROUGHPUT\"})"
813+
"compiled_model = core.compile_model(model, device, {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT})"
808814
]
809815
},
810816
{
@@ -836,7 +842,9 @@
836842
"Note that we always need to explicitly specify the device list for MULTI to work, otherwise MULTI does not know which devices are available for inference. However, this is not the only way to use multiple devices in OpenVINO. There is another performance hint called \"CUMULATIVE_THROUGHPUT\" that works similar to MULTI, except it uses the devices automatically selected by AUTO. This way, we do not need to manually specify devices to use. Below is an example showing how to use \"CUMULATIVE_THROUGHPUT\", equivalent to the MULTI one:\n",
837843
"\n",
838844
"`\n",
839-
"compiled_model = core.compile_model(model=model, device_name=\"AUTO\", config={\"PERFORMANCE_HINT\": \"CUMULATIVE_THROUGHPUT\"})\n",
845+
"\n",
846+
"\n",
847+
"compiled_model = core.compile_model(model=model, device_name=\"AUTO\", config={hints.performance_mode(): hints.PerformanceMode.CUMULATIVE_THROUGHPUT})\n",
840848
"`\n",
841849
"\n",
842850
"> **Important**: **The “THROUGHPUT”, “MULTI”, and “CUMULATIVE_THROUGHPUT” modes are only applicable to asynchronous inferencing pipelines. The example at the end of this article shows how to set up an asynchronous pipeline that takes advantage of parallelism to increase throughput.** To learn more, see [Asynchronous Inferencing](https://docs.openvino.ai/2024/documentation/openvino-extensibility/openvino-plugin-library/asynch-inference-request.html) in OpenVINO as well as the [Asynchronous Inference notebook](../async-api/async-api.ipynb)."
@@ -1584,7 +1592,7 @@
15841592
"# Read model and compile it on GPU in THROUGHPUT mode\n",
15851593
"model = core.read_model(model=model_path)\n",
15861594
"device_name = \"GPU\"\n",
1587-
"compiled_model = core.compile_model(model=model, device_name=device_name, config={\"PERFORMANCE_HINT\": \"THROUGHPUT\"})\n",
1595+
"compiled_model = core.compile_model(model=model, device_name=device_name, config={hints.performance_mode(): hints.PerformanceMode.THROUGHPUT})\n",
15881596
"\n",
15891597
"# Get the input and output nodes\n",
15901598
"input_layer = compiled_model.input(0)\n",
@@ -1996,7 +2004,7 @@
19962004
" )\n",
19972005
" cv2.putText(\n",
19982006
" frame,\n",
1999-
" f\"hint {compiled_model.get_property('PERFORMANCE_HINT')}\",\n",
2007+
" f\"hint {compiled_model.get_property(hints.performance_mode)}\",\n",
20002008
" (5, 60),\n",
20012009
" cv2.FONT_ITALIC,\n",
20022010
" 0.6,\n",

notebooks/grounded-segment-anything/grounded-segment-anything.ipynb

-1
Original file line numberDiff line numberDiff line change
@@ -966,7 +966,6 @@
966966
"outputs": [],
967967
"source": [
968968
"def draw_mask(mask, draw, random_color=False):\n",
969-
"\n",
970969
" if random_color:\n",
971970
" color = (\n",
972971
" np.random.randint(0, 255),\n",

0 commit comments

Comments
 (0)