Skip to content

Commit 14fdda7

Browse files
authored
Merge branch 'main' into chao/avg
2 parents 07c5eb7 + 4aca477 commit 14fdda7

File tree

3 files changed

+5
-18
lines changed

3 files changed

+5
-18
lines changed

.github/workflows/_linux_build.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,8 @@ jobs:
120120
echo "Wheel build successful, update last commit in the issue https://github.com/intel/torch-xpu-ops/issues/1280"
121121
gh --repo $repo issue view $commit_issue --json body -q .body | sed "s;${last_commit};${current_commit};g" | sed '/^$/d' > new_body.txt
122122
gh --repo $repo issue edit $commit_issue --body-file new_body.txt
123-
else
123+
fi
124+
if [ ! -f dist/torch*.whl ]; then
124125
echo "Wheel build failed, use last commit in the issue https://github.com/intel/torch-xpu-ops/issues/1280"
125126
gh --repo $repo issue comment $commit_issue -b "Wheel build failed with commit [${current_commit}](https://github.com/pytorch/pytorch/tree/${current_commit}), refer ${build_url}. CC @intel/torch-xpu-ops-maintain @EikanWang @riverliuintel @fengyuan14 @xytintel @etaf @chuanqi129 @mengfei25"
126127
git clean -df .

test/regressions/test_torchvision_roi_ops.py

-17
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,6 @@ def expected_fn(
471471
) # , ids=str)
472472
@pytest.mark.parametrize("contiguous", (True, False))
473473
@pytest.mark.parametrize("deterministic", (True, False))
474-
@pytest.mark.opcheck_only_one
475474
def test_forward(
476475
self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None
477476
):
@@ -486,22 +485,6 @@ def test_forward(
486485
aligned=aligned,
487486
)
488487

489-
@pytest.mark.parametrize("aligned", (True, False))
490-
@pytest.mark.parametrize("deterministic", (True, False))
491-
@pytest.mark.parametrize("x_dtype", (torch.float, torch.half))
492-
@pytest.mark.parametrize("rois_dtype", (torch.float, torch.half))
493-
@pytest.mark.opcheck_only_one
494-
def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
495-
with torch.amp.autocast("xpu"):
496-
self.test_forward(
497-
torch.device("xpu"),
498-
contiguous=False,
499-
deterministic=deterministic,
500-
aligned=aligned,
501-
x_dtype=x_dtype,
502-
rois_dtype=rois_dtype,
503-
)
504-
505488

506489
class TestPSRoIAlign(RoIOpTester):
507490
mps_backward_atol = 5e-2

test/xpu/skip_list_common.py

+3
Original file line numberDiff line numberDiff line change
@@ -1421,6 +1421,9 @@
14211421
"test_numeric_check_leak_tunableop_rocm_xpu_float32",
14221422
"test_dump_results_on_exit_tunableop_xpu_float32",
14231423
"test_rotating_buffer_tunableop_xpu_float32",
1424+
"test_gemm_bias_tunableop_xpu_bfloat16",
1425+
"test_scaled_gemm_tunableop_xpu_float8_e4m3fnuz",
1426+
"test_scaled_gemm_tunableop_xpu_float8_e5m2fnuz",
14241427
# CUDA bias cases added in latest PyTorch
14251428
# AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable'
14261429
"test_matmul_check_entries_tunableop_xpu_float16",

0 commit comments

Comments
 (0)