Skip to content

Commit 99f9a32

Browse files
Merge pull request #331 from yatarkan/yt/sd-to-optimum-cli
Move from image_generation python conversion scripts to optimum-cli
2 parents bcfc124 + 2865224 commit 99f9a32

File tree

7 files changed

+270
-159
lines changed

7 files changed

+270
-159
lines changed
+73-42
Original file line numberDiff line numberDiff line change
@@ -1,84 +1,115 @@
11
name: stable_diffusion_1_5_cpp
2+
23
on:
34
pull_request:
45
paths:
56
- image_generation/stable_diffusion_1_5/cpp/**
67
- image_generation/common/**
78
- .github/workflows/stable_diffusion_1_5_cpp.yml
89
- thirdparty/openvino_tokenizers
10+
11+
env:
12+
working_directory: "./image_generation/stable_diffusion_1_5/cpp/"
13+
914
concurrency:
1015
group: ${{ github.workflow }}-${{ github.ref }}
1116
cancel-in-progress: true
17+
1218
jobs:
1319
stable_diffusion_1_5_cpp-linux:
1420
runs-on: ubuntu-20.04-8-cores
21+
defaults:
22+
run:
23+
# Do not ignore bash profile files. From:
24+
# https://github.com/marketplace/actions/setup-miniconda#important
25+
shell: bash -l {0}
1526
steps:
1627
- uses: actions/checkout@v4
1728
with:
1829
submodules: recursive
19-
- uses: actions/setup-python@v4
30+
31+
- name: Setup conda
32+
uses: conda-incubator/setup-miniconda@v3
2033
with:
21-
python-version: 3.8
22-
- name: Install OpenVINO
34+
miniconda-version: "latest"
35+
activate-environment: openvino_sd_cpp
36+
python-version: "3.10"
37+
38+
- name: Install OpenVINO and other conda dependencies
2339
run: |
24-
set -e
25-
mkdir openvino
26-
curl https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.1.0-14645-e6dc0865128/l_openvino_toolkit_ubuntu20_2024.1.0.dev20240304_x86_64.tgz | tar --directory ./openvino/ --strip-components 1 -xz
27-
sudo ./openvino/install_dependencies/install_openvino_dependencies.sh
28-
- name: Download / convert models
40+
conda activate openvino_sd_cpp
41+
conda install -c conda-forge openvino c-compiler cxx-compiler make cmake
42+
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
43+
44+
- name: Install python dependencies
45+
working-directory: ${{ env.working_directory }}
2946
run: |
30-
set -e
31-
source ./openvino/setupvars.sh
32-
cd ./image_generation/stable_diffusion_1_5/cpp/scripts/
33-
python -m pip install -U pip
34-
python -m pip install -r ./requirements.txt
35-
python -m pip install ../../../../thirdparty/openvino_tokenizers/
36-
python convert_model.py -sd runwayml/stable-diffusion-v1-5 -b 1 -t FP16 -dyn True
47+
conda activate openvino_sd_cpp
48+
python -m pip install -r requirements.txt
49+
python -m pip install ../../../thirdparty/openvino_tokenizers/[transformers]
50+
51+
- name: Download and convert model and tokenizer
52+
working-directory: ${{ env.working_directory }}
53+
run: |
54+
conda activate openvino_sd_cpp
55+
export MODEL_PATH="models/stable_diffusion_v1_5_ov/FP16"
56+
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --convert-tokenizer --weight-format fp16 $MODEL_PATH
57+
convert_tokenizer $MODEL_PATH/tokenizer/ --tokenizer-output-type i32 -o $MODEL_PATH/tokenizer/
58+
3759
- name: Build app
60+
working-directory: ${{ env.working_directory }}
3861
run: |
39-
set -e
40-
source ./openvino/setupvars.sh
41-
cd ./image_generation/stable_diffusion_1_5/cpp/
62+
conda activate openvino_sd_cpp
4263
cmake -DCMAKE_BUILD_TYPE=Release -S ./ -B ./build/
4364
cmake --build ./build/ --config Release --parallel
65+
4466
- name: Run app
67+
working-directory: ${{ env.working_directory }}
4568
run: |
46-
set -e
47-
source ./openvino/setupvars.sh
48-
cd ./image_generation/stable_diffusion_1_5/cpp/build
49-
./stable_diffusion -m ../scripts/runwayml/stable-diffusion-v1-5 -t FP16_dyn
69+
./build/stable_diffusion -m ./models/stable_diffusion_v1_5_ov -t FP16
70+
5071
stable_diffusion_1_5_cpp-windows:
5172
runs-on: windows-latest
5273
steps:
5374
- uses: actions/checkout@v4
5475
with:
5576
submodules: recursive
56-
- uses: actions/setup-python@v4
77+
78+
- name: Setup conda
79+
uses: conda-incubator/setup-miniconda@v3
5780
with:
58-
python-version: 3.8
59-
- name: Initialize OpenVINO
60-
shell: cmd
81+
miniconda-version: "latest"
82+
activate-environment: openvino_sd_cpp
83+
python-version: "3.10"
84+
85+
- name: Install OpenVINO and other conda dependencies
86+
run: |
87+
conda activate openvino_sd_cpp
88+
conda install -c conda-forge openvino c-compiler cxx-compiler make cmake
89+
90+
- name: Install python dependencies
91+
working-directory: ${{ env.working_directory }}
6192
run: |
62-
curl --output ov.zip https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.1.0-14645-e6dc0865128/w_openvino_toolkit_windows_2024.1.0.dev20240304_x86_64.zip
63-
unzip ov.zip
64-
- name: Download / convert a model / tokenizer
65-
shell: cmd
93+
conda activate openvino_sd_cpp
94+
python -m pip install -r requirements.txt
95+
python -m pip install ../../../thirdparty/openvino_tokenizers/[transformers]
96+
97+
- name: Download and convert model and tokenizer
98+
working-directory: ${{ env.working_directory }}
6699
run: |
67-
call w_openvino_toolkit_windows_2024.1.0.dev20240304_x86_64/setupvars.bat
68-
cd ./image_generation/stable_diffusion_1_5/cpp/scripts/
69-
python -m pip install -r ./requirements.txt
70-
python -m pip install ../../../../thirdparty/openvino_tokenizers/
71-
python convert_model.py -sd runwayml/stable-diffusion-v1-5 -b 1 -t FP16 -dyn True
100+
conda activate openvino_sd_cpp
101+
$env:MODEL_PATH='models/stable_diffusion_v1_5_ov/FP16'
102+
optimum-cli export openvino --model runwayml/stable-diffusion-v1-5 --task stable-diffusion --convert-tokenizer --weight-format fp16 $env:MODEL_PATH
103+
convert_tokenizer $env:MODEL_PATH/tokenizer/ --tokenizer-output-type i32 -o $env:MODEL_PATH/tokenizer/
104+
72105
- name: Build app
73-
shell: cmd
106+
working-directory: ${{ env.working_directory }}
74107
run: |
75-
call w_openvino_toolkit_windows_2024.1.0.dev20240304_x86_64/setupvars.bat
76-
cd ./image_generation/stable_diffusion_1_5/cpp/
108+
conda activate openvino_sd_cpp
77109
cmake -DCMAKE_BUILD_TYPE=Release -S ./ -B ./build/
78110
cmake --build ./build/ --config Release --parallel
111+
79112
- name: Run app
80-
shell: cmd
113+
working-directory: ${{ env.working_directory }}
81114
run: |
82-
call w_openvino_toolkit_windows_2024.1.0.dev20240304_x86_64/setupvars.bat
83-
cd ./image_generation/stable_diffusion_1_5/cpp/build/
84-
call "./Release/stable_diffusion.exe" -m ../scripts/runwayml/stable-diffusion-v1-5 -t FP16_dyn
115+
& "./build/Release/stable_diffusion.exe" -m ./models/stable_diffusion_v1_5_ov -t FP16 --dynamic
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
build
2+
images
3+
models

image_generation/stable_diffusion_1_5/cpp/README.md

+27-22
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ The pure C++ text-to-image pipeline, driven by the OpenVINO native C++ API for S
66
77
## Step 1: Prepare build environment
88

9+
Prerequisites:
10+
- Conda ([installation guide](https://conda.io/projects/conda/en/latest/user-guide/install/index.html))
11+
12+
913
C++ Packages:
1014
* [CMake](https://cmake.org/download/): Cross-platform build tool
1115
* [OpenVINO](https://docs.openvino.ai/install): Model inference
@@ -14,7 +18,9 @@ Prepare a python environment and install dependencies:
1418
```shell
1519
conda create -n openvino_sd_cpp python==3.10
1620
conda activate openvino_sd_cpp
17-
conda install openvino c-compiler cxx-compiler make
21+
conda install -c conda-forge openvino c-compiler cxx-compiler make cmake
22+
# Ensure that Conda standard libraries are used
23+
conda env config vars set LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
1824
```
1925

2026
## Step 2: Convert Stable Diffusion v1.5 and Tokenizer models
@@ -24,32 +30,30 @@ conda install openvino c-compiler cxx-compiler make
2430
1. Install dependencies to import models from HuggingFace:
2531
```shell
2632
git submodule update --init
33+
# Reactivate Conda environment after installing dependencies and setting env vars
2734
conda activate openvino_sd_cpp
28-
python -m pip install -r scripts/requirements.txt
35+
python -m pip install -r requirements.txt
2936
python -m pip install ../../../thirdparty/openvino_tokenizers/[transformers]
3037
```
3138
2. Download a huggingface SD v1.5 model like:
3239
- [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
3340
- [dreamlike-anime-1.0](https://huggingface.co/dreamlike-art/dreamlike-anime-1.0) to run Stable Diffusion with LoRA adapters.
3441

42+
Example command for downloading and exporting FP16 model:
43+
```shell
44+
export MODEL_PATH="models/dreamlike_anime_1_0_ov/FP16"
45+
# Using optimum-cli for exporting model to OpenVINO format
46+
optimum-cli export openvino --model dreamlike-art/dreamlike-anime-1.0 --task stable-diffusion --convert-tokenizer --weight-format fp16 $MODEL_PATH
47+
# Converting tokenizer manually (`--convert-tokenizer` flag of `optimum-cli` results in "OpenVINO Tokenizer export for CLIPTokenizer is not supported.")
48+
convert_tokenizer $MODEL_PATH/tokenizer/ --tokenizer-output-type i32 -o $MODEL_PATH/tokenizer/
49+
```
3550

36-
Example command:
37-
```shell
38-
huggingface-cli download --resume-download --local-dir-use-symlinks False dreamlike-art/dreamlike-anime-1.0 --local-dir models/dreamlike-anime-1.0
39-
```
51+
You can also choose other precision and export FP32 or INT8 model.
4052

41-
Please, refer to the official website for [model downloading](https://huggingface.co/docs/hub/models-downloading) to read more details.
42-
43-
3. Run model conversion script to convert PyTorch model to OpenVINO IR via [optimum-intel](https://github.com/huggingface/optimum-intel). Please, use the script `scripts/convert_model.py` to convert the model into `FP16_static` or `FP16_dyn`, which will be saved into the `models` folder:
44-
```shell
45-
cd scripts
46-
python convert_model.py -b 1 -t FP16 -sd ../models/dreamlike-anime-1.0 # to convert to models with static shapes
47-
python convert_model.py -b 1 -t FP16 -sd ../models/dreamlike-anime-1.0 -dyn True # to keep models with dynamic shapes
48-
python convert_model.py -b 1 -t INT8 -sd ../models/dreamlike-anime-1.0 -dyn True # to compress the models to INT8
49-
```
53+
Please, refer to the official website for [🤗 Optimum](https://huggingface.co/docs/optimum/main/en/index) and [optimum-intel](https://github.com/huggingface/optimum-intel) to read more details.
5054

5155
> [!NOTE]
52-
>Now the pipeline support batch size = 1 only, i.e. static model `(1, 3, 512, 512)`
56+
> Now the pipeline support batch size = 1 only, i.e. static model `(1, 3, 512, 512)`
5357
5458
### LoRA enabling with safetensors
5559

@@ -70,7 +74,7 @@ cmake --build build --parallel
7074

7175
## Step 4: Run Pipeline
7276
```shell
73-
./stable_diffusion [-p <posPrompt>] [-n <negPrompt>] [-s <seed>] [--height <output image>] [--width <output image>] [-d <device>] [-r <readNPLatent>] [-l <lora.safetensors>] [-a <alpha>] [-h <help>] [-m <modelPath>] [-t <modelType>]
77+
./build/stable_diffusion [-p <posPrompt>] [-n <negPrompt>] [-s <seed>] [--height <output image>] [--width <output image>] [-d <device>] [-r <readNPLatent>] [-l <lora.safetensors>] [-a <alpha>] [-h <help>] [-m <modelPath>] [-t <modelType>] [--dynamic]
7478

7579
Usage:
7680
stable_diffusion [OPTION...]
@@ -86,8 +90,9 @@ Usage:
8690
* `--width arg` Width of output image (default: 512)
8791
* `-c, --useCache` Use model caching
8892
* `-r, --readNPLatent` Read numpy generated latents from file
89-
* `-m, --modelPath arg` Specify path of SD model IR (default: ../models/dreamlike-anime-1.0)
90-
* `-t, --type arg` Specify the type of SD model IR (FP16_static or FP16_dyn) (default: FP16_static)
93+
* `-m, --modelPath arg` Specify path of SD model IR (default: ../models/dreamlike_anime_1_0_ov)
94+
* `-t, --type arg` Specify the type of SD model IRs (FP32, FP16 or INT8) (default: FP16)
95+
* `--dynamic` Specify the model input shape to use dynamic shape
9196
* `-l, --loraPath arg` Specify path of lora file. (*.safetensors). (default: )
9297
* `-a, --alpha arg` alpha for lora (default: 0.75)
9398
* `-h, --help` Print usage
@@ -103,15 +108,15 @@ Negative prompt: (empty, here couldn't use OV tokenizer, check the issues for de
103108

104109
Read the numpy latent instead of C++ std lib for the alignment with Python pipeline
105110

106-
* Generate image without lora `./stable_diffusion -r`
111+
* Generate image without lora `./build/stable_diffusion -r`
107112

108113
![](./without_lora.bmp)
109114

110-
* Generate image with soulcard lora `./stable_diffusion -r`
115+
* Generate image with soulcard lora `./build/stable_diffusion -r`
111116

112117
![](./soulcard_lora.bmp)
113118

114-
* Generate different size image with dynamic model (C++ lib generated latent): `./stable_diffusion -m ../models/dreamlike-anime-1.0 -t FP16_dyn --height 448 --width 704`
119+
* Generate different size image with dynamic model (C++ lib generated latent): `./build/stable_diffusion -m ./models/dreamlike_anime_1_0_ov -t FP16 --dynamic --height 448 --width 704`
115120

116121
![](./704x448.bmp)
117122

image_generation/stable_diffusion_1_5/cpp/scripts/convert_model.py

-46
This file was deleted.

0 commit comments

Comments
 (0)