|
12 | 12 | # See the License for the specific language governing permissions and
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 |
| ---- |
16 | 15 | inference-optimization-inc-ipex-quantization-notebook-${PYTHON_VERSION:-3.9}-cpu:
|
17 | 16 | cmd: papermill --log-output jupyter/inc-ipex-quantization/quantize_with_inc.ipynb result.ipynb -k pytorch-cpu --cwd jupyter/inc-ipex-quantization
|
18 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 17 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
19 | 18 | notebook: True
|
20 | 19 | inference-optimization-inc-ipex-quantization-notebook-${PYTHON_VERSION:-3.9}-gpu:
|
21 | 20 | cmd: papermill --log-output jupyter/inc-ipex-quantization/quantize_with_inc.ipynb result.ipynb -k pytorch-gpu --cwd jupyter/inc-ipex-quantization
|
22 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 21 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
23 | 22 | notebook: True
|
24 | 23 |
|
25 | 24 | inference-optimization-inc-itex-notebook-${PYTHON_VERSION:-3.9}-cpu:
|
26 | 25 | cmd: papermill --log-output jupyter/inc-itex/inc_sample_tensorflow.ipynb result.ipynb -k tensorflow-cpu --cwd jupyter/inc-itex
|
27 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 26 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
28 | 27 | notebook: True
|
29 | 28 |
|
30 | 29 | # Status: Commented due to out of resources error
|
31 | 30 | # inference-optimization-inc-itex-notebook-${PYTHON_VERSION:-3.9}-gpu:
|
32 | 31 | # cmd: papermill --log-output jupyter/inc-itex/inc_sample_tensorflow.ipynb result.ipynb -k tensorflow-gpu --cwd jupyter/inc-itex
|
33 |
| -# img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 32 | +# img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
34 | 33 | # notebook: True
|
35 | 34 |
|
36 | 35 | inference-optimization-inc-tensorflow-${PYTHON_VERSION:-3.9}-cpu:
|
37 | 36 | cmd: conda run -n tensorflow-cpu sample-tests/neural_compressor/tensorflow/run.sh cpu
|
38 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 37 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
39 | 38 | inference-optimization-inc-tensorflow-${PYTHON_VERSION:-3.9}-gpu:
|
40 | 39 | cmd: conda run -n tensorflow-gpu sample-tests/neural_compressor/tensorflow/run.sh gpu
|
41 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 40 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
42 | 41 |
|
43 | 42 | inference-optimization-inc-torch-${PYTHON_VERSION:-3.9}-cpu:
|
44 | 43 | cmd: conda run -n pytorch-cpu sample-tests/neural_compressor/torch/run.sh cpu
|
45 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 44 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
46 | 45 |
|
47 | 46 | inference-optimization-ipex-${PYTHON_VERSION:-3.9}-cpu:
|
48 | 47 | cmd: conda run -n pytorch-cpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device cpu --ipex
|
49 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 48 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
50 | 49 | inference-optimization-ipex-${PYTHON_VERSION:-3.9}-gpu:
|
51 | 50 | cmd: conda run -n pytorch-gpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device xpu --ipex
|
52 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 51 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
53 | 52 |
|
54 | 53 | inference-optimization-itex-${PYTHON_VERSION:-3.9}-cpu:
|
55 | 54 | cmd: conda run -n tensorflow-cpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py
|
56 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 55 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
57 | 56 | inference-optimization-itex-${PYTHON_VERSION:-3.9}-gpu:
|
58 | 57 | cmd: conda run -n tensorflow-gpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py
|
59 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 58 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
60 | 59 |
|
61 | 60 | inference-optimization-itex-inference-notebook-${PYTHON_VERSION:-3.9}-cpu:
|
62 | 61 | cmd: papermill --log-output jupyter/itex-inference/tutorial_optimize_TensorFlow_pretrained_model.ipynb result.ipynb -k tensorflow-cpu --cwd jupyter/itex-inference
|
63 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 62 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
64 | 63 | notebook: True
|
65 | 64 |
|
66 | 65 | # Need update from TensorFlow v1 to V2
|
67 | 66 | # inference-optimization-itex-inference-notebook-${PYTHON_VERSION:-3.9}-gpu:
|
68 | 67 | # cmd: papermill --log-output jupyter/itex-inference/tutorial_optimize_TensorFlow_pretrained_model.ipynb result.ipynb -k tensorflow-gpu --cwd jupyter/itex-inference
|
69 |
| -# img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 68 | +# img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
70 | 69 | # notebook: True
|
71 | 70 |
|
72 | 71 | inference-optimization-onnx-${PYTHON_VERSION:-3.9}-cpu:
|
73 | 72 | cmd: conda run -n tensorflow-cpu sample-tests/onnx/run.sh
|
74 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 73 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
75 | 74 | inference-optimization-onnx-${PYTHON_VERSION:-3.9}-gpu:
|
76 | 75 | cmd: conda run -n tensorflow-gpu sample-tests/onnx/run.sh
|
77 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 76 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
78 | 77 |
|
79 | 78 | inference-optimization-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu:
|
80 | 79 | cmd: conda run -n tensorflow-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco'
|
81 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 80 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
82 | 81 | inference-optimization-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu:
|
83 | 82 | cmd: conda run -n tensorflow-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco'
|
84 | 83 |
|
85 | 84 | inference-optimization-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu:
|
86 | 85 | cmd: conda run -n pytorch-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco'
|
87 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 86 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
88 | 87 | inference-optimization-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu:
|
89 | 88 | cmd: conda run -n pytorch-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco'
|
90 |
| - img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
| 89 | + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} |
0 commit comments