forked from openvinotoolkit/openvino.genai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage-generation.tsx
87 lines (76 loc) · 2.92 KB
/
image-generation.tsx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import { ExploreCodeSamples } from '@site/src/components/GoToLink/explore-code-samples';
import { GoToDocumentation } from '@site/src/components/GoToLink/go-to-documentation';
import { LanguageTabs, TabItemCpp, TabItemPython } from '@site/src/components/LanguageTabs';
import { Section } from '@site/src/components/Section';
import CodeBlock from '@theme/CodeBlock';
import { SectionImage } from './Section/section-image';
import ImagePlaceholder from '@site/static/img/image-generation-placeholder.webp';
const FEATURES = [
'Alter parameters (width, height, iterations) and compile model for static size',
'Load LoRA adapters (in safetensor format) and dynamically switch between them',
'Generate multiple images per one request',
];
const pythonCodeBlock = (
<CodeBlock language="python">
{`import argparse
from PIL import Image
import openvino_genai
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_dir')
parser.add_argument('prompt')
args = parser.parse_args()
device = 'CPU' # GPU, NPU can be used as well
pipe = openvino_genai.Text2ImagePipeline(args.model_dir, device)
image_tensor = pipe.generate(
args.prompt,
width=512,
height=512,
num_inference_steps=20
)
image = Image.fromarray(image_tensor.data[0])
image.save("image.bmp")`}
</CodeBlock>
);
const cppCodeBlock = (
<CodeBlock language="cpp">
{`#include "openvino/genai/image_generation/text2image_pipeline.hpp"
#include "imwrite.hpp"
int main(int argc, char* argv[]) {
const std::string models_path = argv[1], prompt = argv[2];
const std::string device = "CPU"; // GPU, NPU can be used as well
ov::genai::Text2ImagePipeline pipe(models_path, device);
ov::Tensor image = pipe.generate(prompt,
ov::genai::width(512),
ov::genai::height(512),
ov::genai::num_inference_steps(20));
imwrite("image.bmp", image, true);
}`}
</CodeBlock>
);
export const ImageGeneration = () => {
return (
<Section.Container>
<Section.Column>
<Section.Title>Image generation API</Section.Title>
<Section.Description>
A user-friendly image generation API can be used with generative models to improve
creative tools and increase productivity. For instance, it can be utilized in furniture
design tools to create various design concepts.
</Section.Description>
<SectionImage url={ImagePlaceholder} alt={'Image generation API'} />
</Section.Column>
<Section.Column>
<Section.Features features={FEATURES} />
<hr />
<LanguageTabs>
<TabItemPython>{pythonCodeBlock}</TabItemPython>
<TabItemCpp>{cppCodeBlock}</TabItemCpp>
</LanguageTabs>
<hr />
<ExploreCodeSamples link={'docs/category/samples'} />
<GoToDocumentation link={'docs/how-to-guides/image-generation'} />
</Section.Column>
</Section.Container>
);
};