@@ -174,33 +174,34 @@ def visualize(
174
174
# Convert back to dict
175
175
return self ._update_explanation_with_processed_sal_map (explanation , saliency_map_np , indices_to_return )
176
176
177
- @staticmethod
178
177
def _put_classification_info (
178
+ self ,
179
179
saliency_map_np : np .ndarray ,
180
180
indices : List [int ],
181
181
label_names : List [str ] | None ,
182
182
predictions : Dict [int , Prediction ] | None ,
183
183
) -> None :
184
- corner_location = 3 , 17
184
+ offset = 3
185
185
for smap , target_index in zip (range (len (saliency_map_np )), indices ):
186
186
label = label_names [target_index ] if label_names else str (target_index )
187
187
if predictions and target_index in predictions :
188
188
score = predictions [target_index ].score
189
189
if score :
190
190
label = f"{ label } |{ score :.2f} "
191
191
192
+ font_scale , text_height = self ._fit_text_to_image (label , offset , saliency_map_np [smap ].shape [1 ])
192
193
cv2 .putText (
193
194
saliency_map_np [smap ],
194
195
label ,
195
- org = corner_location ,
196
- fontFace = 1 ,
197
- fontScale = 1.3 ,
196
+ org = ( offset , text_height + offset ) ,
197
+ fontFace = 2 ,
198
+ fontScale = font_scale ,
198
199
color = (255 , 0 , 0 ),
199
- thickness = 2 ,
200
+ thickness = 1 ,
200
201
)
201
202
202
- @staticmethod
203
203
def _put_detection_info (
204
+ self ,
204
205
saliency_map_np : np .ndarray ,
205
206
indices : List [int ],
206
207
label_names : List [str ] | None ,
@@ -209,6 +210,7 @@ def _put_detection_info(
209
210
if not predictions :
210
211
return
211
212
213
+ offset = 7
212
214
for smap , target_index in zip (range (len (saliency_map_np )), indices ):
213
215
saliency_map = saliency_map_np [smap ]
214
216
label_index = predictions [target_index ].label
@@ -220,17 +222,40 @@ def _put_detection_info(
220
222
221
223
label = label_names [label_index ] if label_names else label_index
222
224
label_score = f"{ label } |{ score :.2f} "
223
- box_location = int (x1 ), int (y1 - 5 )
225
+
226
+ font_scale , _ = self ._fit_text_to_image (label_score , x1 , saliency_map .shape [1 ])
227
+ box_location = x1 , y1 - offset
224
228
cv2 .putText (
225
229
saliency_map ,
226
230
label_score ,
227
231
org = box_location ,
228
- fontFace = 1 ,
229
- fontScale = 1.3 ,
232
+ fontFace = 2 ,
233
+ fontScale = font_scale ,
230
234
color = (255 , 0 , 0 ),
231
- thickness = 2 ,
235
+ thickness = 1 ,
232
236
)
233
237
238
+ @staticmethod
239
+ def _fit_text_to_image (
240
+ text : str ,
241
+ x_start : int ,
242
+ image_width : int ,
243
+ font_scale : float = 1.0 ,
244
+ thickness : int = 1 ,
245
+ ) -> Tuple [float , int ]:
246
+ font_face = 2
247
+ max_width = image_width - 5
248
+ while True :
249
+ text_size , _ = cv2 .getTextSize (text , font_face , font_scale , thickness )
250
+ text_width , text_height = text_size
251
+
252
+ if x_start + text_width <= max_width :
253
+ return font_scale , text_height
254
+
255
+ font_scale -= 0.1
256
+ if abs (font_scale - 0.1 ) < 0.001 :
257
+ return font_scale , text_height
258
+
234
259
@staticmethod
235
260
def _apply_scaling (explanation : Explanation , saliency_map_np : np .ndarray ) -> np .ndarray :
236
261
if explanation .layout not in GRAY_LAYOUTS :
0 commit comments