Skip to content

Commit 7ae9763

Browse files
Fix keyerror guidance scale (openvinotoolkit#1165)
Fix the issue when running stable-diffusion [ INFO ] Traceback (most recent call last): File "/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/benchmark.py", line 202, in main iter_data_list, pretrain_time, iter_timestamp = CASE_TO_BENCH[model_args['use_case']]( File "/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/task/image_generation.py", line 198, in run_image_generation_benchmark image_gen_fn(image_param, num, prompt_idx_list[image_id], pipe, args, iter_data_list, proc_id, mem_consumption) File "/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/task/image_generation.py", line 50, in run_image_generation f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}' KeyError: 'guidance_scale' --------- Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
1 parent 4051f45 commit 7ae9763

File tree

3 files changed

+17
-13
lines changed

3 files changed

+17
-13
lines changed

tools/llm_bench/llm_bench_utils/gen_output_data.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ def gen_iterate_data(
2525
iter_data['generation_time'] = gen_time
2626
iter_data['latency'] = latency
2727
iter_data['result_md5'] = res_md5
28-
iter_data['first_token_latency'] = ''
29-
iter_data['other_tokens_avg_latency'] = ''
30-
iter_data['first_token_infer_latency'] = ''
31-
iter_data['other_tokens_infer_avg_latency'] = ''
28+
iter_data['first_token_latency'] = -1
29+
iter_data['other_tokens_avg_latency'] = -1
30+
iter_data['first_token_infer_latency'] = -1
31+
iter_data['other_tokens_infer_avg_latency'] = -1
3232
iter_data['max_rss_mem_consumption'] = max_rss_mem
3333
iter_data['max_shared_mem_consumption'] = max_shared_mem
3434
iter_data['max_uss_mem_consumption'] = max_uss_mem
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}
1+
{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}
2+
{"prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}

tools/llm_bench/task/image_generation.py

+11-8
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,11 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list,
4545
set_seed(args['seed'])
4646
input_text = image_param['prompt']
4747
input_args = collects_input_args(image_param, args['model_type'], args['model_name'])
48-
log.info(
49-
f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, "
50-
f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}'
51-
)
48+
out_str = f"Input params: Batch_size={args['batch_size']}, " \
49+
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
50+
if 'guidance_scale' in input_args:
51+
out_str += f", guidance_scale={input_args['guidance_scale']}"
52+
log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}]{out_str}")
5253

5354
result_md5_list = []
5455
max_rss_mem_consumption = ''
@@ -107,10 +108,12 @@ def run_image_generation_genai(image_param, num, image_id, pipe, args, iter_data
107108
set_seed(args['seed'])
108109
input_text = image_param['prompt']
109110
input_args = collects_input_args(image_param, args['model_type'], args['model_name'])
110-
log.info(
111-
f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, "
112-
f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}'
113-
)
111+
out_str = f"Input params: Batch_size={args['batch_size']}, " \
112+
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
113+
if 'guidance_scale' in input_args:
114+
out_str += f", guidance_scale={input_args['guidance_scale']}"
115+
log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}] {out_str}")
116+
114117
result_md5_list = []
115118
max_rss_mem_consumption = ''
116119
max_uss_mem_consumption = ''

0 commit comments

Comments
 (0)