Skip to content

Commit 25bba07

Browse files
GHA: update workflows (#1720)
- Align archives with ones, updated in PR 7998955 - Use macos-13 instead of macos-12 as last one is not available - Ubuntu 20.04 is also deprecated (see migration commit in OV openvinotoolkit/openvino@9e30651)
1 parent d297551 commit 25bba07

10 files changed

+33
-30
lines changed

.github/workflows/bandit.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ on:
77
permissions: read-all # Required by https://github.com/ossf/scorecard/blob/e23b8ad91fd6a64a0a971ca4fc0a4d1650725615/docs/checks.md#token-permissions
88
jobs:
99
bandit:
10-
runs-on: ubuntu-20.04
10+
runs-on: ubuntu-22.04
1111
steps:
1212
- uses: actions/checkout@v4
1313
- uses: actions/setup-python@v4

.github/workflows/causal_lm_cpp.yml

+12-12
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ env:
2323

2424
jobs:
2525
cpp-multinomial-greedy_causal_lm-ubuntu:
26-
runs-on: ubuntu-20.04-8-cores
26+
runs-on: ubuntu-22.04-8-cores
2727
defaults:
2828
run:
2929
shell: bash
@@ -82,7 +82,7 @@ jobs:
8282
./build/samples/cpp/text_generation/beam_search_causal_lm,
8383
python ./samples/python/text_generation/beam_search_causal_lm.py,
8484
]
85-
runs-on: ubuntu-20.04
85+
runs-on: ubuntu-22.04
8686
defaults:
8787
run:
8888
shell: bash
@@ -298,7 +298,7 @@ jobs:
298298
&& python samples\python\text_generation\lora.py .\TinyLlama\TinyLlama-1.1B-intermediate-step-1431k-3T\ adapter_model.safetensors "How to create a table with two columns, one of them has type float, another one has type int?"
299299
300300
cpp-greedy_causal_lm-Qwen-7B-Chat:
301-
runs-on: ubuntu-20.04-16-cores
301+
runs-on: ubuntu-22.04-16-cores
302302
defaults:
303303
run:
304304
shell: bash
@@ -332,7 +332,7 @@ jobs:
332332
PYTHONPATH: "./build"
333333
334334
cpp-beam_search_causal_lm-Qwen1_5-7B-Chat:
335-
runs-on: ubuntu-20.04-16-cores
335+
runs-on: ubuntu-22.04-16-cores
336336
defaults:
337337
run:
338338
shell: bash
@@ -367,7 +367,7 @@ jobs:
367367
PYTHONPATH: "./build"
368368
369369
cpp-beam_search_causal_lm-Phi-2:
370-
runs-on: ubuntu-20.04-16-cores
370+
runs-on: ubuntu-22.04-16-cores
371371
defaults:
372372
run:
373373
shell: bash
@@ -402,7 +402,7 @@ jobs:
402402
PYTHONPATH: "./build"
403403
404404
cpp-beam_search_causal_lm-notus-7b-v1:
405-
runs-on: ubuntu-20.04-16-cores
405+
runs-on: ubuntu-22.04-16-cores
406406
defaults:
407407
run:
408408
shell: bash
@@ -437,7 +437,7 @@ jobs:
437437
PYTHONPATH: "./build"
438438
439439
cpp-speculative_decoding_lm-ubuntu:
440-
runs-on: ubuntu-20.04-16-cores
440+
runs-on: ubuntu-22.04-16-cores
441441
defaults:
442442
run:
443443
shell: bash
@@ -451,7 +451,7 @@ jobs:
451451
- name: Install OpenVINO
452452
run: |
453453
mkdir ./ov/
454-
curl ${{ env.l_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
454+
curl ${{ env.l_u22_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
455455
sudo ./ov/install_dependencies/install_openvino_dependencies.sh
456456
- name: Build app
457457
run: |
@@ -494,7 +494,7 @@ jobs:
494494
LD_LIBRARY_PATH: "./build/openvino_genai/:$LD_LIBRARY_PATH"
495495

496496
cpp-prompt_lookup_decoding_lm-ubuntu:
497-
runs-on: ubuntu-20.04-16-cores
497+
runs-on: ubuntu-22.04-16-cores
498498
defaults:
499499
run:
500500
shell: bash
@@ -558,7 +558,7 @@ jobs:
558558
LD_LIBRARY_PATH: "./build/openvino_genai/:$LD_LIBRARY_PATH"
559559

560560
cpp-Phi-1_5:
561-
runs-on: ubuntu-20.04-16-cores
561+
runs-on: ubuntu-22.04-16-cores
562562
defaults:
563563
run:
564564
shell: bash
@@ -616,7 +616,7 @@ jobs:
616616
PYTHONPATH: "./build"
617617
618618
cpp-greedy_causal_lm-redpajama-3b-chat:
619-
runs-on: ubuntu-20.04-8-cores
619+
runs-on: ubuntu-22.04-8-cores
620620
defaults:
621621
run:
622622
shell: bash
@@ -909,7 +909,7 @@ jobs:
909909
timeout-minutes: 4
910910

911911
cpp-continuous-batching-ubuntu:
912-
runs-on: ubuntu-20.04-8-cores
912+
runs-on: ubuntu-22.04-8-cores
913913
defaults:
914914
run:
915915
shell: bash

.github/workflows/job_vlm_sample_llava.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ on:
1111
type: string
1212

1313
env:
14-
l_u22_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-17911-83c047443de/l_openvino_toolkit_ubuntu22_2025.1.0.dev20250116_x86_64.tgz
14+
l_u22_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-18141-b0a120c9684/openvino_toolkit_ubuntu22_2025.1.0.dev20250211_x86_64.tgz
1515

1616
jobs:
1717
visual_language_chat_sample-ubuntu-llava:

.github/workflows/lcm_dreamshaper_cpp.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,16 @@ concurrency:
1818

1919
env:
2020
PYTHON_VERSION: '3.9'
21-
LINUX_OV_ARCHIVE_URL: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-17911-83c047443de/l_openvino_toolkit_ubuntu22_2025.1.0.dev20250116_x86_64.tgz
22-
WINDOWS_OV_ARCHIVE_URL: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-17911-83c047443de/w_openvino_toolkit_windows_2025.1.0.dev20250116_x86_64.zip
21+
LINUX_OV_ARCHIVE_URL: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-18141-b0a120c9684/openvino_toolkit_ubuntu22_2025.1.0.dev20250211_x86_64.tgz
22+
WINDOWS_OV_ARCHIVE_URL: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2025.1.0-18141-b0a120c9684/openvino_toolkit_windows_2025.1.0.dev20250211_x86_64.zip
2323
OV_INSTALL_DIR: ${{ github.workspace }}/ov
2424

2525
jobs:
2626
lcm_dreamshaper_v7_cpp-linux:
2727
runs-on: ubuntu-22.04
2828
defaults:
2929
run:
30-
shell: bash -l {0}
30+
shell: bash
3131
env:
3232
build_dir: ${{ github.workspace }}//build
3333
steps:

.github/workflows/mac.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: macOS (12, Python 3.10)
1+
name: macOS (13, Python 3.10)
22
on:
33
workflow_dispatch:
44
pull_request:
@@ -29,7 +29,7 @@ jobs:
2929
defaults:
3030
run:
3131
shell: bash
32-
runs-on: ubuntu-20.04
32+
runs-on: ubuntu-22.04
3333
steps:
3434
- name: Download OpenVINO build
3535
id: openvino_download
@@ -58,7 +58,7 @@ jobs:
5858
defaults:
5959
run:
6060
shell: bash
61-
runs-on: 'macos-12-large'
61+
runs-on: 'macos-13-large'
6262
env:
6363
MACOSX_DEPLOYMENT_TARGET: '10.15'
6464
CMAKE_BUILD_TYPE: 'Release'

.github/workflows/stable_diffusion_1_5_cpp.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ jobs:
7878
needs: [ openvino_download_linux ]
7979
defaults:
8080
run:
81-
shell: bash -l {0}
81+
shell: bash
8282
env:
8383
build_dir: ${{ github.workspace }}//build
8484
SRC_DIR: ${{ github.workspace }}

.github/workflows/windows.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ jobs:
2929
defaults:
3030
run:
3131
shell: bash
32-
runs-on: ubuntu-20.04
32+
runs-on: ubuntu-22.04
3333

3434
steps:
3535
- name: Download OpenVINO build

src/python/openvino_genai/py_openvino_genai.pyi

+1-1
Original file line numberDiff line numberDiff line change
@@ -1535,7 +1535,7 @@ class StreamerBase:
15351535
"""
15361536
Put is called every time new token is decoded. Returns a bool flag to indicate whether generation should be stopped, if return true generation stops
15371537
"""
1538-
def write(self, token: int) -> ...:
1538+
def write(self, token: int) -> StreamingStatus:
15391539
"""
15401540
Write is called every time new token is decoded. Returns a StreamingStatus flag to indicate whether generation should be stopped or cancelled
15411541
"""

src/python/py_openvino_genai.cpp

+9-6
Original file line numberDiff line numberDiff line change
@@ -122,17 +122,20 @@ PYBIND11_MODULE(py_openvino_genai, m) {
122122
.def_readonly("scores", &EncodedResults::scores)
123123
.def_readonly("perf_metrics", &EncodedResults::perf_metrics);
124124

125-
py::class_<StreamerBase, ConstructableStreamer, std::shared_ptr<StreamerBase>>(m, "StreamerBase", streamer_base_docstring) // Change the holder form unique_ptr to shared_ptr
126-
.def(py::init<>())
127-
.def("put", &StreamerBase::put, "Put is called every time new token is decoded. Returns a bool flag to indicate whether generation should be stopped, if return true generation stops", py::arg("token"))
128-
.def("write", &StreamerBase::write, "Write is called every time new token is decoded. Returns a StreamingStatus flag to indicate whether generation should be stopped or cancelled", py::arg("token"))
129-
.def("end", &StreamerBase::end, "End is called at the end of generation. It can be used to flush cache if your own streamer has one");
130-
131125
py::enum_<ov::genai::StreamingStatus>(m, "StreamingStatus")
132126
.value("RUNNING", ov::genai::StreamingStatus::RUNNING)
133127
.value("CANCEL", ov::genai::StreamingStatus::CANCEL)
134128
.value("STOP", ov::genai::StreamingStatus::STOP);
135129

130+
auto streamer = py::class_<StreamerBase, ConstructableStreamer, std::shared_ptr<StreamerBase>>(m, "StreamerBase", streamer_base_docstring) // Change the holder form unique_ptr to shared_ptr
131+
.def(py::init<>())
132+
.def("write", &StreamerBase::write, "Write is called every time new token is decoded. Returns a StreamingStatus flag to indicate whether generation should be stopped or cancelled", py::arg("token"))
133+
.def("end", &StreamerBase::end, "End is called at the end of generation. It can be used to flush cache if your own streamer has one");
134+
135+
OPENVINO_SUPPRESS_DEPRECATED_START
136+
streamer.def("put", &StreamerBase::put, "Put is called every time new token is decoded. Returns a bool flag to indicate whether generation should be stopped, if return true generation stops", py::arg("token"));
137+
OPENVINO_SUPPRESS_DEPRECATED_END
138+
136139
init_tokenizer(m);
137140
init_lora_adapter(m);
138141
init_generation_config(m);

tests/python_tests/test_llm_pipeline.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def test_chat_scenario_several_chats_in_series():
172172

173173
generation_config_kwargs, _ = chat_intpus[0]
174174
ov_generation_config = GenerationConfig(**generation_config_kwargs)
175-
hf_generation_config = convert_to_hf(opt_model.generation_config, ov_generation_config)
175+
hf_generation_config = generation_config_to_hf(opt_model.generation_config, ov_generation_config)
176176

177177
for i in range(2):
178178
chat_history_hf = []

0 commit comments

Comments
 (0)