|
1 |
| -name: ONNX Runtime / Test GPU |
| 1 | +name: ONNX Runtime GPU / Python - Test |
2 | 2 |
|
3 | 3 | on:
|
4 | 4 | workflow_dispatch:
|
5 | 5 | schedule:
|
6 |
| - - cron: 0 1 */3 * * # at 1am every 3 days |
| 6 | + - cron: 0 7 * * * # every day at 7am UTC |
7 | 7 | pull_request:
|
8 |
| - types: [opened, synchronize, reopened, labeled] |
9 |
| - # uncomment to enable on PR merge on main branch: |
10 |
| - #push: |
11 |
| - # branches: |
12 |
| - # - main |
| 8 | + branches: |
| 9 | + - main |
| 10 | + types: |
| 11 | + - opened |
| 12 | + - labeled |
| 13 | + - reopened |
| 14 | + - unlabeled |
| 15 | + - synchronize |
| 16 | + |
| 17 | +concurrency: |
| 18 | + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} |
| 19 | + cancel-in-progress: true |
13 | 20 |
|
14 | 21 | jobs:
|
15 |
| - do-the-job: |
16 |
| - if: ${{ (github.event_name == 'workflow_dispatch') || (github.event_name == 'schedule') || contains( github.event.pull_request.labels.*.name, 'gpu-test') }} |
17 |
| - name: Start self-hosted EC2 runner |
| 22 | + build: |
| 23 | + if: ${{ |
| 24 | + (github.event_name == 'push') || |
| 25 | + (github.event_name == 'workflow_dispatch') || |
| 26 | + contains(github.event.pull_request.labels.*.name, 'gpu') || |
| 27 | + contains(github.event.pull_request.labels.*.name, 'onnxruntime-gpu') |
| 28 | + }} |
| 29 | + |
18 | 30 | runs-on:
|
19 | 31 | group: aws-g6-4xlarge-plus
|
20 |
| - env: |
21 |
| - AWS_REGION: us-east-1 |
| 32 | + |
| 33 | + container: |
| 34 | + image: nvcr.io/nvidia/tensorrt:24.12-py3 |
| 35 | + options: --gpus all |
| 36 | + |
22 | 37 | steps:
|
23 | 38 | - name: Checkout
|
24 |
| - uses: actions/checkout@v2 |
25 |
| - - name: Build image |
| 39 | + uses: actions/checkout@v4 |
| 40 | + |
| 41 | + - name: Setup Python |
| 42 | + uses: actions/setup-python@v5 |
| 43 | + with: |
| 44 | + python-version: "3.9" |
| 45 | + |
| 46 | + - name: Install dependencies |
26 | 47 | run: |
|
27 |
| - docker build -f tests/onnxruntime/docker/Dockerfile_onnxruntime_gpu -t onnxruntime-gpu . |
28 |
| - - name: Test with unittest within docker container |
| 48 | + pip install --upgrade pip |
| 49 | + pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124 |
| 50 | + pip install .[tests,onnxruntime-gpu,diffusers] |
| 51 | +
|
| 52 | + - name: Test with pytest |
29 | 53 | run: |
|
30 |
| - docker run --rm --gpus all -v /mnt/cache/.cache/huggingface:/root/.cache/huggingface --workdir=/workspace/optimum/tests onnxruntime-gpu:latest |
| 54 | + pytest tests/onnxruntime -m "cuda_ep_test or trt_ep_test" --durations=0 -vvvv -n auto |
0 commit comments