Skip to content

Commit c81e765

Browse files
authored
avoid random lib usage (openvinotoolkit#2344)
1 parent 593da6b commit c81e765

File tree

17 files changed

+527
-86
lines changed

17 files changed

+527
-86
lines changed

notebooks/ct-segmentation-quantize/ct-segmentation-quantize-nncf.ipynb

+458-52
Large diffs are not rendered by default.

notebooks/explainable-ai-3-map-interpretation/explainable-ai-3-map-interpretation.ipynb

+38-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
{
22
"cells": [
33
{
4+
"attachments": {},
45
"cell_type": "markdown",
56
"metadata": {},
67
"source": [
@@ -62,6 +63,7 @@
6263
]
6364
},
6465
{
66+
"attachments": {},
6567
"cell_type": "markdown",
6668
"metadata": {},
6769
"source": [
@@ -93,6 +95,7 @@
9395
]
9496
},
9597
{
98+
"attachments": {},
9699
"cell_type": "markdown",
97100
"metadata": {},
98101
"source": [
@@ -107,7 +110,6 @@
107110
"outputs": [],
108111
"source": [
109112
"import os\n",
110-
"import random\n",
111113
"import zipfile\n",
112114
"from pathlib import Path\n",
113115
"\n",
@@ -131,6 +133,7 @@
131133
]
132134
},
133135
{
136+
"attachments": {},
134137
"cell_type": "markdown",
135138
"metadata": {},
136139
"source": [
@@ -139,6 +142,7 @@
139142
]
140143
},
141144
{
145+
"attachments": {},
142146
"cell_type": "markdown",
143147
"metadata": {},
144148
"source": [
@@ -211,12 +215,13 @@
211215
"print(f\"Number of images to get explanations: {len(img_files)}\")\n",
212216
"\n",
213217
"# Get a fewer subset for fast execution\n",
214-
"random.seed(42)\n",
215-
"img_files = random.sample(img_files, 1)\n",
218+
"np.random.seed(42)\n",
219+
"img_files = np.random.choice(img_files, 1)\n",
216220
"print(f\"Run explanations on fewer number of images: {len(img_files)}\")"
217221
]
218222
},
219223
{
224+
"attachments": {},
220225
"cell_type": "markdown",
221226
"metadata": {},
222227
"source": [
@@ -257,6 +262,7 @@
257262
]
258263
},
259264
{
265+
"attachments": {},
260266
"cell_type": "markdown",
261267
"metadata": {},
262268
"source": [
@@ -265,6 +271,7 @@
265271
]
266272
},
267273
{
274+
"attachments": {},
268275
"cell_type": "markdown",
269276
"metadata": {},
270277
"source": [
@@ -302,6 +309,7 @@
302309
]
303310
},
304311
{
312+
"attachments": {},
305313
"cell_type": "markdown",
306314
"metadata": {},
307315
"source": [
@@ -322,6 +330,7 @@
322330
]
323331
},
324332
{
333+
"attachments": {},
325334
"cell_type": "markdown",
326335
"metadata": {},
327336
"source": [
@@ -330,6 +339,7 @@
330339
]
331340
},
332341
{
342+
"attachments": {},
333343
"cell_type": "markdown",
334344
"metadata": {},
335345
"source": [
@@ -377,6 +387,7 @@
377387
]
378388
},
379389
{
390+
"attachments": {},
380391
"cell_type": "markdown",
381392
"metadata": {},
382393
"source": [
@@ -385,6 +396,7 @@
385396
]
386397
},
387398
{
399+
"attachments": {},
388400
"cell_type": "markdown",
389401
"metadata": {},
390402
"source": [
@@ -393,6 +405,7 @@
393405
]
394406
},
395407
{
408+
"attachments": {},
396409
"cell_type": "markdown",
397410
"metadata": {},
398411
"source": [
@@ -432,6 +445,7 @@
432445
]
433446
},
434447
{
448+
"attachments": {},
435449
"cell_type": "markdown",
436450
"metadata": {},
437451
"source": [
@@ -440,6 +454,7 @@
440454
]
441455
},
442456
{
457+
"attachments": {},
443458
"cell_type": "markdown",
444459
"metadata": {},
445460
"source": [
@@ -490,6 +505,7 @@
490505
]
491506
},
492507
{
508+
"attachments": {},
493509
"cell_type": "markdown",
494510
"metadata": {},
495511
"source": [
@@ -498,6 +514,7 @@
498514
]
499515
},
500516
{
517+
"attachments": {},
501518
"cell_type": "markdown",
502519
"metadata": {},
503520
"source": [
@@ -527,6 +544,7 @@
527544
]
528545
},
529546
{
547+
"attachments": {},
530548
"cell_type": "markdown",
531549
"metadata": {},
532550
"source": [
@@ -554,6 +572,7 @@
554572
]
555573
},
556574
{
575+
"attachments": {},
557576
"cell_type": "markdown",
558577
"metadata": {},
559578
"source": [
@@ -562,6 +581,7 @@
562581
]
563582
},
564583
{
584+
"attachments": {},
565585
"cell_type": "markdown",
566586
"metadata": {},
567587
"source": [
@@ -668,6 +688,7 @@
668688
]
669689
},
670690
{
691+
"attachments": {},
671692
"cell_type": "markdown",
672693
"metadata": {},
673694
"source": [
@@ -796,6 +817,7 @@
796817
]
797818
},
798819
{
820+
"attachments": {},
799821
"cell_type": "markdown",
800822
"metadata": {},
801823
"source": [
@@ -820,6 +842,7 @@
820842
]
821843
},
822844
{
845+
"attachments": {},
823846
"cell_type": "markdown",
824847
"metadata": {},
825848
"source": [
@@ -828,6 +851,7 @@
828851
]
829852
},
830853
{
854+
"attachments": {},
831855
"cell_type": "markdown",
832856
"metadata": {},
833857
"source": [
@@ -871,6 +895,7 @@
871895
]
872896
},
873897
{
898+
"attachments": {},
874899
"cell_type": "markdown",
875900
"metadata": {},
876901
"source": [
@@ -879,6 +904,7 @@
879904
]
880905
},
881906
{
907+
"attachments": {},
882908
"cell_type": "markdown",
883909
"metadata": {},
884910
"source": [
@@ -892,6 +918,7 @@
892918
]
893919
},
894920
{
921+
"attachments": {},
895922
"cell_type": "markdown",
896923
"metadata": {},
897924
"source": [
@@ -900,6 +927,7 @@
900927
]
901928
},
902929
{
930+
"attachments": {},
903931
"cell_type": "markdown",
904932
"metadata": {},
905933
"source": [
@@ -928,6 +956,7 @@
928956
]
929957
},
930958
{
959+
"attachments": {},
931960
"cell_type": "markdown",
932961
"metadata": {},
933962
"source": [
@@ -939,6 +968,7 @@
939968
]
940969
},
941970
{
971+
"attachments": {},
942972
"cell_type": "markdown",
943973
"metadata": {},
944974
"source": [
@@ -967,13 +997,15 @@
967997
]
968998
},
969999
{
1000+
"attachments": {},
9701001
"cell_type": "markdown",
9711002
"metadata": {},
9721003
"source": [
9731004
"`True positive low confidence` basically means that key features are not well available or are transformed. From the saliency maps, we see that the model is paying attention to the whole object, trying to make a decision mostly based on high-level features."
9741005
]
9751006
},
9761007
{
1008+
"attachments": {},
9771009
"cell_type": "markdown",
9781010
"metadata": {},
9791011
"source": [
@@ -1002,6 +1034,7 @@
10021034
]
10031035
},
10041036
{
1037+
"attachments": {},
10051038
"cell_type": "markdown",
10061039
"metadata": {},
10071040
"source": [
@@ -1021,6 +1054,7 @@
10211054
]
10221055
},
10231056
{
1057+
"attachments": {},
10241058
"cell_type": "markdown",
10251059
"metadata": {},
10261060
"source": [
@@ -1049,6 +1083,7 @@
10491083
]
10501084
},
10511085
{
1086+
"attachments": {},
10521087
"cell_type": "markdown",
10531088
"metadata": {},
10541089
"source": [

notebooks/florence2/gradio_helper.py

+9-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import io
22
import copy
3-
import random
43
import requests
54
from pathlib import Path
65

@@ -68,8 +67,13 @@ def draw_polygons(image, prediction, fill_mask=False):
6867
draw = ImageDraw.Draw(image)
6968
scale = 1
7069
for polygons, label in zip(prediction["polygons"], prediction["labels"]):
71-
color = random.choice(colormap)
72-
fill_color = random.choice(colormap) if fill_mask else None
70+
color_id = np.random.choice(len(colormap))
71+
color = colormap[color_id]
72+
if fill_mask:
73+
fill_color_id = np.random.choice(len(colormap))
74+
fill_color = colormap[fill_color_id]
75+
else:
76+
fill_color = None
7377
for _polygon in polygons:
7478
_polygon = np.array(_polygon).reshape(-1, 2)
7579
if len(_polygon) < 3:
@@ -96,7 +100,8 @@ def draw_ocr_bboxes(image, prediction):
96100
draw = ImageDraw.Draw(image)
97101
bboxes, labels = prediction["quad_boxes"], prediction["labels"]
98102
for box, label in zip(bboxes, labels):
99-
color = random.choice(colormap)
103+
color_id = np.random.choice(len(colormap))
104+
color = colormap[color_id]
100105
new_box = (np.array(box) * scale).tolist()
101106
draw.polygon(new_box, width=3, outline=color)
102107
draw.text((new_box[0] + 8, new_box[1] + 2), "{}".format(label), align="right", fill=color)

notebooks/flux.1-image-generation/gradio_helper.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import gradio as gr
22
import numpy as np
3-
import random
43
import torch
54

65
MAX_SEED = np.iinfo(np.int32).max
@@ -23,7 +22,7 @@
2322
def make_demo(ov_pipe):
2423
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidance_scale=0, progress=gr.Progress(track_tqdm=True)):
2524
if randomize_seed:
26-
seed = random.randint(0, MAX_SEED)
25+
seed = np.random.randint(0, MAX_SEED)
2726
generator = torch.Generator().manual_seed(seed)
2827
image = ov_pipe(
2928
prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale

notebooks/grounded-segment-anything/grounded-segment-anything.ipynb

+3-4
Original file line numberDiff line numberDiff line change
@@ -966,13 +966,12 @@
966966
"outputs": [],
967967
"source": [
968968
"def draw_mask(mask, draw, random_color=False):\n",
969-
" import random\n",
970969
"\n",
971970
" if random_color:\n",
972971
" color = (\n",
973-
" random.randint(0, 255),\n",
974-
" random.randint(0, 255),\n",
975-
" random.randint(0, 255),\n",
972+
" np.random.randint(0, 255),\n",
973+
" np.random.randint(0, 255),\n",
974+
" np.random.randint(0, 255),\n",
976975
" 153,\n",
977976
" )\n",
978977
" else:\n",

notebooks/instant-id/gradio_helper.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from pathlib import Path
22
from typing import Callable
33
import gradio as gr
4-
import random
54
import numpy as np
65
from diffusers.utils import load_image
76
from style_template import styles
@@ -32,7 +31,7 @@
3231

3332
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
3433
if randomize_seed:
35-
seed = random.randint(0, MAX_SEED)
34+
seed = np.random.randint(0, MAX_SEED)
3635
return seed
3736

3837

notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -986,7 +986,7 @@
986986
" batch_size = input_ids.shape[0]\n",
987987
" if not self.stateful:\n",
988988
" for input_name in self.key_value_input_names:\n",
989-
" model_inputs = self.modeget_anyres_image_grid_shapel.input(input_name)\n",
989+
" model_inputs = self.model.input(input_name)\n",
990990
" shape = model_inputs.get_partial_shape()\n",
991991
" shape[0] = batch_size\n",
992992
" if shape[2].is_dynamic:\n",

0 commit comments

Comments
 (0)