forked from huggingface/optimum-intel
-
Notifications
You must be signed in to change notification settings - Fork 0
61 lines (50 loc) · 4.05 KB
/
test_openvino_notebooks.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
name: OpenVINO - Notebooks Test
on:
workflow_dispatch:
schedule:
- cron: '14 3 * * 1' # run weekly: every Monday at 3:14
push:
paths:
- '.github/workflows/test_openvino_notebooks.yml'
- 'notebooks/openvino/*'
pull_request:
paths:
- '.github/workflows/test_openvino_notebooks.yml'
- 'notebooks/openvino/*'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.10"]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
# Install PyTorch CPU to prevent unnecessary downloading/installing of CUDA packages
# ffmpeg, torchaudio and pillow are required for image classification and audio classification pipelines
sudo apt-get install ffmpeg
pip install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
pip install ".[tests, openvino]" nbval
pip install -r notebooks/openvino/requirements.txt
- run: free -h
- run: lscpu
- run: pip freeze
- name: Test with Pytest
run: |
python -m pytest --nbval-lax notebooks/openvino/optimum_openvino_inference.ipynb notebooks/openvino/question_answering_quantization.ipynb
- name: Test examples
run: |
pip install datasets==2.4.0 && python run_image_classification.py --model_name_or_path nateraw/vit-base-beans --dataset_name beans --max_train_samples 50 --max_eval_samples 10 --remove_unused_columns False --do_train --do_eval --learning_rate 2e-5 --num_train_epochs 1 --logging_strategy steps --logging_steps 10 --evaluation_strategy epoch --save_strategy epoch --save_total_limit 3 --seed 1337 --output_dir /tmp/beans_outputs/
TASK_NAME=sst2 && python run_glue.py --model_name_or_path bert-base-uncased --task_name $TASK_NAME --max_train_samples 50 --max_eval_samples 10 --output_dir /tmp/qat-bert-base-ft-$TASK_NAME --overwrite_output_dir --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 32 --learning_rate 1e-5 --optim adamw_torch --num_train_epochs 3 --logging_steps 10 --evaluation_strategy steps --eval_steps 250 --save_strategy epoch --seed 42
python run_audio_classification.py --model_name_or_path facebook/wav2vec2-base --nncf_compression_config configs/wav2vec2-base-qat.json --dataset_name superb --dataset_config_name ks --max_train_samples 50 --max_eval_samples 10 --output_dir /tmp/qat-wav2vec2-base-ft-keyword-spotting --overwrite_output_dir --remove_unused_columns False --do_train --do_eval --learning_rate 3e-5 --max_length_seconds 1 --attention_mask False --warmup_ratio 0.1 --num_train_epochs 5 --per_device_train_batch_size 32 --gradient_accumulation_steps 4 --per_device_eval_batch_size 64 --dataloader_num_workers 4 --logging_strategy steps --logging_steps 10 --evaluation_strategy epoch --save_strategy epoch --load_best_model_at_end True --metric_for_best_model accuracy --save_total_limit 3 --seed 42
python train_text_to_image_qat.py --ema_device="cpu" --use_kd --model_id="svjack/Stable-Diffusion-Pokemon-en" --max_train_samples 20 --center_crop --random_flip --dataloader_num_workers=2 --dataset_name="lambdalabs/pokemon-blip-captions" --max_train_steps=1 --output_dir=sd-quantized-pokemon
python run_qa.py --model_name_or_path distilbert-base-uncased-distilled-squad --dataset_name squad --do_train --do_eval --per_device_train_batch_size 8 --per_device_eval_batch_size 8 --max_train_samples 50 --learning_rate 3e-5 --num_train_epochs 1 --max_seq_length 384 --doc_stride 128 --output_dir /tmp/outputs_squad/