Skip to content

Commit c512a8b

Browse files
committed
added SD for examples
1 parent cd211bb commit c512a8b

File tree

1 file changed

+5
-15
lines changed

1 file changed

+5
-15
lines changed

.github/workflows/test_openvino_examples.yml

+5-15
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,16 @@
1-
name: OpenVINO - Notebooks and Examples Test
1+
name: OpenVINO - Examples Test
22

33
on:
44
workflow_dispatch:
55
schedule:
66
- cron: '14 3 * * 1' # run weekly: every Monday at 3:14
77
push:
88
paths:
9-
- '.github/workflows/test_openvino_notebooks.yml'
109
- '.github/workflows/test_openvino_examples.yml'
11-
- 'notebooks/openvino/*'
1210
- 'examples/openvino/*'
1311
pull_request:
1412
paths:
15-
- '.github/workflows/test_openvino_notebooks.yml'
1613
- '.github/workflows/test_openvino_examples.yml'
17-
- 'notebooks/openvino/*'
1814
- 'examples/openvino/*'
1915

2016

@@ -42,11 +38,8 @@ jobs:
4238
run: |
4339
# Install PyTorch CPU to prevent unnecessary downloading/installing of CUDA packages
4440
# ffmpeg, torchaudio and pillow are required for image classification and audio classification pipelines
45-
sudo apt-get install ffmpeg
46-
# pip install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
47-
pip install ".[tests, openvino]" nbval
48-
pip install optimum[openvino] nncf torchaudio datasets==2.4.0
49-
pip install -r notebooks/openvino/requirements.txt
41+
pip install optimum[openvino] nncf
42+
pip install -r examples/openvino/audio-classification/requirements.txt
5043
pip install -r examples/openvino/image-classification/requirements.txt
5144
pip install -r examples/openvino/question-answering/requirements.txt
5245
pip install -r examples/openvino/text-classification/requirements.txt
@@ -55,13 +48,10 @@ jobs:
5548
- run: lscpu
5649
- run: pip freeze
5750

58-
- name: Test with Pytest
59-
run: |
60-
python -m pytest --nbval-lax notebooks/openvino/optimum_openvino_inference.ipynb notebooks/openvino/question_answering_quantization.ipynb
61-
6251
- name: Test examples
6352
run: |
64-
#python examples/openvino/audio-classification/run_audio_classification.py --model_name_or_path facebook/wav2vec2-base --nncf_compression_config examples/openvino/audio-classification/configs/wav2vec2-base-qat.json --dataset_name superb --dataset_config_name ks --max_train_samples 10 --max_eval_samples 2 --output_dir /tmp/qat-wav2vec2-base-ft-keyword-spotting --overwrite_output_dir --remove_unused_columns False --do_train --learning_rate 3e-5 --max_length_seconds 1 --attention_mask False --warmup_ratio 0.1 --num_train_epochs 1 --gradient_accumulation_steps 1 --dataloader_num_workers 1 --logging_strategy steps --logging_steps 1 --evaluation_strategy epoch --save_strategy epoch --load_best_model_at_end False --seed 42
53+
python examples/openvino/audio-classification/run_audio_classification.py --model_name_or_path facebook/wav2vec2-base --nncf_compression_config examples/openvino/audio-classification/configs/wav2vec2-base-qat.json --dataset_name superb --dataset_config_name ks --max_train_samples 10 --max_eval_samples 2 --output_dir /tmp/qat-wav2vec2-base-ft-keyword-spotting --overwrite_output_dir --remove_unused_columns False --do_train --learning_rate 3e-5 --max_length_seconds 1 --attention_mask False --warmup_ratio 0.1 --num_train_epochs 1 --gradient_accumulation_steps 1 --dataloader_num_workers 1 --logging_strategy steps --logging_steps 1 --evaluation_strategy epoch --save_strategy epoch --load_best_model_at_end False --seed 42
6554
TASK_NAME=sst2 && python examples/openvino/text-classification/run_glue.py --model_name_or_path sshleifer/tiny-distilbert-base-cased-distilled-squad --task_name $TASK_NAME --max_train_samples 10 --max_eval_samples 2 --output_dir /tmp/qat-bert-base-ft-$TASK_NAME --overwrite_output_dir --do_train --do_eval --max_seq_length 128 --learning_rate 1e-5 --optim adamw_torch --num_train_epochs 1 --logging_steps 10 --evaluation_strategy steps --eval_steps 5 --save_strategy epoch --seed 42
6655
python examples/openvino/question-answering/run_qa.py --model_name_or_path sshleifer/tiny-distilbert-base-cased-distilled-squad --dataset_name squad --do_train --do_eval --max_train_samples 10 --max_eval_samples 2 --learning_rate 3e-5 --num_train_epochs 1 --max_seq_length 384 --doc_stride 128 --output_dir /tmp/outputs_squad/ --overwrite_output_dir
6756
python examples/openvino/image-classification/run_image_classification.py --model_name_or_path nateraw/vit-base-beans --dataset_name beans --max_train_samples 10 --max_eval_samples 2 --remove_unused_columns False --do_train --learning_rate 2e-5 --num_train_epochs 1 --logging_strategy steps --logging_steps 1 --evaluation_strategy epoch --save_strategy epoch --save_total_limit 1 --seed 1337 --output_dir /tmp/beans_outputs/
57+
pip install -r examples/openvino/stable-diffusion/requirements.txt && python examples/openvino/stable-diffusion/train_text_to_image_qat.py --ema_device="cpu" --use_kd --model_id="svjack/Stable-Diffusion-Pokemon-en" --max_train_samples 10 --center_crop --random_flip --dataloader_num_workers=2 --dataset_name="lambdalabs/pokemon-blip-captions" --max_train_steps=1 --output_dir=sd-quantized-pokemon

0 commit comments

Comments
 (0)