53
53
StableDiffusionXLImg2ImgPipeline ,
54
54
StableDiffusionXLInpaintPipeline ,
55
55
ControlNetModel ,
56
+ StableDiffusionLatentUpscalePipeline ,
56
57
DDIMScheduler ,
57
58
DPMSolverMultistepScheduler ,
58
59
EulerAncestralDiscreteScheduler ,
@@ -535,6 +536,23 @@ class NoWatermark:
535
536
def apply_watermark (self , img ):
536
537
return img
537
538
539
+
540
+ def get_diffusers_upscaler (upscaler : str ):
541
+ torch ._dynamo .reset ()
542
+ openvino_clear_caches ()
543
+ model_name = "stabilityai/sd-x2-latent-upscaler"
544
+ print ("OpenVINO Script: loading upscaling model: " + model_name )
545
+ sd_model = StableDiffusionLatentUpscalePipeline .from_pretrained (model_name , torch_dtype = torch .float32 )
546
+ sd_model .safety_checker = None
547
+ sd_model .cond_stage_key = functools .partial (cond_stage_key , shared .sd_model )
548
+ sd_model .unet = torch .compile (sd_model .unet , backend = "openvino" )
549
+ sd_model .vae .decode = torch .compile (sd_model .vae .decode , backend = "openvino" )
550
+ shared .sd_diffusers_model = sd_model
551
+ del sd_model
552
+
553
+ return shared .sd_diffusers_model
554
+
555
+
538
556
def get_diffusers_sd_model (model_config , vae_ckpt , sampler_name , enable_caching , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac ):
539
557
if (model_state .recompile == 1 ):
540
558
model_state .partition_id = 0
@@ -770,7 +788,8 @@ def init_new(self, all_prompts, all_seeds, all_subseeds):
770
788
else :
771
789
raise RuntimeError (f"bad number of images passed: { len (imgs )} ; expecting { self .batch_size } or less" )
772
790
773
- def process_images_openvino (p : StableDiffusionProcessing , model_config , vae_ckpt , sampler_name , enable_caching , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac ) -> Processed :
791
+ def process_images_openvino (p : StableDiffusionProcessing , model_config , vae_ckpt , sampler_name , enable_caching , override_hires , upscaler , hires_steps , d_strength , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac ) -> Processed :
792
+ """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
774
793
775
794
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
776
795
if (mode == 0 and p .enable_hr ):
@@ -1092,6 +1111,23 @@ def callback(iter, t, latents):
1092
1111
1093
1112
devices .torch_gc ()
1094
1113
1114
+ # Hight resolutuon mode
1115
+ if override_hires :
1116
+ if upscaler == "Latent" :
1117
+ model_state .mode = - 1
1118
+ shared .sd_diffusers_model = get_diffusers_upscaler (upscaler )
1119
+ img_idx = slice (len (output_images )) if p .batch_size == 1 else slice (1 , len (output_images ))
1120
+ output_images [img_idx ] = shared .sd_diffusers_model (
1121
+ image = output_images [img_idx ],
1122
+ prompt = p .prompts ,
1123
+ negative_prompt = p .negative_prompts ,
1124
+ num_inference_steps = hires_steps ,
1125
+ guidance_scale = p .cfg_scale ,
1126
+ generator = generator ,
1127
+ callback = callback ,
1128
+ callback_steps = 1 ,
1129
+ ).images
1130
+
1095
1131
res = Processed (
1096
1132
p ,
1097
1133
images_list = output_images ,
@@ -1102,7 +1138,8 @@ def callback(iter, t, latents):
1102
1138
index_of_first_image = index_of_first_image ,
1103
1139
infotexts = infotexts ,
1104
1140
)
1105
-
1141
+ if override_hires :
1142
+ res .info = res .info + f", Hires upscaler: { upscaler } , Denoising strength: { d_strength } "
1106
1143
res .info = res .info + ", Warm up time: " + str (round (warmup_duration , 2 )) + " secs "
1107
1144
1108
1145
if (generation_rate >= 1.0 ):
@@ -1116,6 +1153,9 @@ def callback(iter, t, latents):
1116
1153
1117
1154
return res
1118
1155
1156
+ def on_change (mode ):
1157
+ return gr .update (visible = mode )
1158
+
1119
1159
class Script (scripts .Script ):
1120
1160
def title (self ):
1121
1161
return "Accelerate with OpenVINO"
@@ -1170,6 +1210,12 @@ def get_refiner_list():
1170
1210
override_sampler = gr .Checkbox (label = "Override the sampling selection from the main UI (Recommended as only below sampling methods have been validated for OpenVINO)" , value = True )
1171
1211
sampler_name = gr .Radio (label = "Select a sampling method" , choices = ["Euler a" , "Euler" , "LMS" , "Heun" , "DPM++ 2M" , "LMS Karras" , "DPM++ 2M Karras" , "DDIM" , "PLMS" ], value = "Euler a" )
1172
1212
enable_caching = gr .Checkbox (label = "Cache the compiled models on disk for faster model load in subsequent launches (Recommended)" , value = True , elem_id = self .elem_id ("enable_caching" ))
1213
+ override_hires = gr .Checkbox (label = "Override the Hires.fix selection from the main UI (Recommended as only below upscalers have been validated for OpenVINO)" , value = False , visible = self .is_txt2img )
1214
+ with gr .Group (visible = False ) as hires :
1215
+ with gr .Row ():
1216
+ upscaler = gr .Dropdown (label = "Upscaler" , choices = ["Latent" ], value = "Latent" )
1217
+ hires_steps = gr .Slider (1 , 150 , value = 10 , step = 1 , label = "Steps" )
1218
+ d_strength = gr .Slider (0 , 1 , value = 0.5 , step = 0.01 , label = "Strength" )
1173
1219
warmup_status = gr .Textbox (label = "Device" , interactive = False , visible = False )
1174
1220
vae_status = gr .Textbox (label = "VAE" , interactive = False , visible = False )
1175
1221
gr .Markdown (
@@ -1184,6 +1230,8 @@ def get_refiner_list():
1184
1230
So it's normal for the first inference after a settings change to be slower, while subsequent inferences use the optimized compiled model and run faster.
1185
1231
""" )
1186
1232
1233
+ override_hires .change (on_change , override_hires , hires )
1234
+
1187
1235
def device_change (choice ):
1188
1236
if (model_state .device == choice ):
1189
1237
return gr .update (value = "Device selected is " + choice , visible = True )
@@ -1206,9 +1254,9 @@ def refiner_ckpt_change(choice):
1206
1254
else :
1207
1255
model_state .refiner_ckpt = choice
1208
1256
refiner_ckpt .change (refiner_ckpt_change , refiner_ckpt )
1209
- return [model_config , vae_ckpt , openvino_device , override_sampler , sampler_name , enable_caching , is_xl_ckpt , refiner_ckpt , refiner_frac ]
1257
+ return [model_config , vae_ckpt , openvino_device , override_sampler , sampler_name , enable_caching , override_hires , upscaler , hires_steps , d_strength , is_xl_ckpt , refiner_ckpt , refiner_frac ]
1210
1258
1211
- def run (self , p , model_config , vae_ckpt , openvino_device , override_sampler , sampler_name , enable_caching , is_xl_ckpt , refiner_ckpt , refiner_frac ):
1259
+ def run (self , p , model_config , vae_ckpt , openvino_device , override_sampler , sampler_name , enable_caching , override_hires , upscaler , hires_steps , d_strength , is_xl_ckpt , refiner_ckpt , refiner_frac ):
1212
1260
os .environ ["OPENVINO_TORCH_BACKEND_DEVICE" ] = str (openvino_device )
1213
1261
1214
1262
if enable_caching :
@@ -1225,14 +1273,12 @@ def run(self, p, model_config, vae_ckpt, openvino_device, override_sampler, samp
1225
1273
mode = 0
1226
1274
if self .is_txt2img :
1227
1275
mode = 0
1228
- processed = process_images_openvino (p , model_config , vae_ckpt , p .sampler_name , enable_caching , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac )
1276
+ processed = process_images_openvino (p , model_config , vae_ckpt , p .sampler_name , enable_caching , override_hires , upscaler , hires_steps , d_strength , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac )
1229
1277
else :
1230
1278
if p .image_mask is None :
1231
1279
mode = 1
1232
1280
else :
1233
1281
mode = 2
1234
1282
p .init = functools .partial (init_new , p )
1235
- processed = process_images_openvino (p , model_config , vae_ckpt , p .sampler_name , enable_caching , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac )
1283
+ processed = process_images_openvino (p , model_config , vae_ckpt , p .sampler_name , enable_caching , override_hires , upscaler , hires_steps , d_strength , openvino_device , mode , is_xl_ckpt , refiner_ckpt , refiner_frac )
1236
1284
return processed
1237
-
1238
-
0 commit comments