@@ -506,6 +506,7 @@ def test_flip_eye_crash(self):
506
506
helper_test_op ([], lambda : (torch .eye (10 )@torch .eye (10 ).flip (0 )),
507
507
lambda : (Tensor .eye (10 )@Tensor .eye (10 ).flip (0 )), forward_only = True )
508
508
509
+ @unittest .skipIf (Device .DEFAULT == "WEBGPU" , "this test uses more than 8 bufs passing the WEBGPU limit" ) #TODO: remove after #1461
509
510
def test_broadcast_full (self ):
510
511
for torch_op , tinygrad_op in [(torch .add , Tensor .add ), (torch .sub , Tensor .sub ), (torch .mul , Tensor .mul ),
511
512
(torch .div , Tensor .div ), (torch .pow , Tensor .pow )]:
@@ -517,6 +518,7 @@ def test_broadcast_simple(self):
517
518
helper_test_op ([(45 ,65 ), (45 ,1 )], lambda x ,y : x / y , lambda x ,y : x / y )
518
519
helper_test_op ([(45 ,65 ), ()], lambda x ,y : x / y , lambda x ,y : x / y )
519
520
521
+ @unittest .skipIf (Device .DEFAULT == "WEBGPU" , "this test uses more than 8 bufs passing the WEBGPU limit" ) #TODO: remove after #1461
520
522
def test_broadcast_partial (self ):
521
523
for torch_op , tinygrad_op in [(torch .add , Tensor .add ), (torch .sub , Tensor .sub ), (torch .mul , Tensor .mul ),
522
524
(torch .div , Tensor .div ), (torch .pow , Tensor .pow )]:
0 commit comments