Skip to content

Commit e31ac56

Browse files
authored
update optimum version (openvinotoolkit#1333)
* update optimum version * Update notebooks/240-dolly-2-instruction-following/240-dolly-2-instruction-following.ipynb
1 parent 6037580 commit e31ac56

File tree

7 files changed

+870
-2443
lines changed

7 files changed

+870
-2443
lines changed

notebooks/240-dolly-2-instruction-following/240-dolly-2-instruction-following.ipynb

+47-77
Original file line numberDiff line numberDiff line change
@@ -64,22 +64,9 @@
6464
"execution_count": 1,
6565
"id": "4421fc85-bed6-4a62-b8fa-19c7ba474891",
6666
"metadata": {},
67-
"outputs": [
68-
{
69-
"name": "stdout",
70-
"output_type": "stream",
71-
"text": [
72-
"\n",
73-
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.1.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2\u001b[0m\n",
74-
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
75-
"\n",
76-
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.1.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2\u001b[0m\n",
77-
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
78-
]
79-
}
80-
],
67+
"outputs": [],
8168
"source": [
82-
"%pip install -q \"diffusers>=0.16.1\" \"transformers>=4.28.0\" \"openvino-dev>=2023.1.0\"\n",
69+
"%pip install -q \"diffusers>=0.16.1\" \"transformers>=4.28.0\" \"openvino>=2023.1.0\"\n",
8370
"%pip install -q \"git+https://github.com/huggingface/optimum-intel.git\" datasets onnx onnxruntime gradio "
8471
]
8572
},
@@ -102,12 +89,12 @@
10289
{
10390
"data": {
10491
"application/vnd.jupyter.widget-view+json": {
105-
"model_id": "5bc9f8fc615a4cf7af5cb987afd0211d",
92+
"model_id": "5fe94d76fb364dd4ae8e6e39abe65cd7",
10693
"version_major": 2,
10794
"version_minor": 0
10895
},
10996
"text/plain": [
110-
"Dropdown(description='Device:', index=2, options=('CPU', 'GPU', 'AUTO'), value='AUTO')"
97+
"Dropdown(description='Device:', options=('CPU', 'GPU', 'AUTO'), value='CPU')"
11198
]
11299
},
113100
"execution_count": 2,
@@ -123,7 +110,7 @@
123110
"\n",
124111
"device = widgets.Dropdown(\n",
125112
" options=core.available_devices + [\"AUTO\"],\n",
126-
" value='AUTO',\n",
113+
" value='CPU',\n",
127114
" description='Device:',\n",
128115
" disabled=False,\n",
129116
")\n",
@@ -162,16 +149,6 @@
162149
"id": "91f42296-627d-44ff-a1cb-936bb6f87992",
163150
"metadata": {},
164151
"outputs": [
165-
{
166-
"name": "stderr",
167-
"output_type": "stream",
168-
"text": [
169-
"2023-07-17 14:47:00.308996: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
170-
"2023-07-17 14:47:00.348466: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
171-
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
172-
"2023-07-17 14:47:01.039895: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
173-
]
174-
},
175152
{
176153
"name": "stdout",
177154
"output_type": "stream",
@@ -184,53 +161,14 @@
184161
"output_type": "stream",
185162
"text": [
186163
"No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'\n",
187-
"comet_ml is installed but `COMET_API_KEY` is not set.\n",
188-
"The argument `from_transformers` is deprecated, and will be removed in optimum 2.0. Use `export` instead\n",
189-
"Framework not specified. Using pt to export to ONNX.\n",
190-
"Using framework PyTorch: 1.13.1+cpu\n",
191-
"Overriding 1 configuration item(s)\n",
192-
"\t- use_cache -> True\n",
193-
"/home/ea/work/notebooks_convert/notebooks_conv_env/lib/python3.8/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py:504: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
194-
" assert batch_size > 0, \"batch_size has to be defined and > 0\"\n",
195-
"/home/ea/work/notebooks_convert/notebooks_conv_env/lib/python3.8/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py:270: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
196-
" if seq_len > self.max_seq_len_cached:\n",
197-
"/home/ea/work/notebooks_convert/notebooks_conv_env/lib/python3.8/site-packages/nncf/torch/dynamic_graph/wrappers.py:74: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n",
198-
" op1 = operator(*args, **kwargs)\n",
199-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
200-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
201-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
202-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
203-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
204-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
205-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
206-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
207-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
208-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
209-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
210-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
211-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
212-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
213-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
214-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
215-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
216-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
217-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
218-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
219-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
220-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
221-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
222-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
223-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
224-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
225-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
226-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
227-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
228-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
229-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
230-
"In-place op on output of tensor.shape. See https://pytorch.org/docs/master/onnx.html#avoid-inplace-operations-when-using-tensor-shape-in-tracing-mode\n",
231-
"Saving external data to one file...\n",
164+
"2023-09-19 18:04:40.914286: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
165+
"2023-09-19 18:04:40.953392: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
166+
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
167+
"2023-09-19 18:04:41.581851: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
168+
"/home/ea/work/ov_venv/lib/python3.8/site-packages/transformers/deepspeed.py:23: FutureWarning: transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations\n",
169+
" warnings.warn(\n",
232170
"Compiling the model...\n",
233-
"Set CACHE_DIR to /tmp/tmpndw8_20n/model_cache\n"
171+
"Set CACHE_DIR to dolly-v2-3b/model_cache\n"
234172
]
235173
}
236174
],
@@ -249,7 +187,8 @@
249187
"if model_path.exists():\n",
250188
" ov_model = OVModelForCausalLM.from_pretrained(model_path, device=current_device)\n",
251189
"else:\n",
252-
" ov_model = OVModelForCausalLM.from_pretrained(model_id, device=current_device, from_transformers=True)\n",
190+
" ov_model = OVModelForCausalLM.from_pretrained(model_id, device=current_device, export=True)\n",
191+
" ov_model.half()\n",
253192
" ov_model.save_pretrained(model_path)"
254193
]
255194
},
@@ -579,7 +518,7 @@
579518
"name": "stderr",
580519
"output_type": "stream",
581520
"text": [
582-
"/tmp/ipykernel_1272681/896135151.py:57: GradioDeprecationWarning: The `enable_queue` parameter has been deprecated. Please use the `.queue()` method instead.\n",
521+
"/tmp/ipykernel_177871/2332051390.py:57: GradioDeprecationWarning: The `enable_queue` parameter has been deprecated. Please use the `.queue()` method instead.\n",
583522
" demo.launch(enable_queue=True, share=False, height=800)\n"
584523
]
585524
},
@@ -693,7 +632,38 @@
693632
},
694633
"widgets": {
695634
"application/vnd.jupyter.widget-state+json": {
696-
"state": {},
635+
"state": {
636+
"0685b60213644b5faa887230d63d2f9d": {
637+
"model_module": "@jupyter-widgets/controls",
638+
"model_module_version": "2.0.0",
639+
"model_name": "DescriptionStyleModel",
640+
"state": {
641+
"description_width": ""
642+
}
643+
},
644+
"5fe94d76fb364dd4ae8e6e39abe65cd7": {
645+
"model_module": "@jupyter-widgets/controls",
646+
"model_module_version": "2.0.0",
647+
"model_name": "DropdownModel",
648+
"state": {
649+
"_options_labels": [
650+
"CPU",
651+
"GPU",
652+
"AUTO"
653+
],
654+
"description": "Device:",
655+
"index": 0,
656+
"layout": "IPY_MODEL_7cd80d2c20ad4e7384ee8c822ff61d04",
657+
"style": "IPY_MODEL_0685b60213644b5faa887230d63d2f9d"
658+
}
659+
},
660+
"7cd80d2c20ad4e7384ee8c822ff61d04": {
661+
"model_module": "@jupyter-widgets/base",
662+
"model_module_version": "2.0.0",
663+
"model_name": "LayoutModel",
664+
"state": {}
665+
}
666+
},
697667
"version_major": 2,
698668
"version_minor": 0
699669
}

0 commit comments

Comments
 (0)