@@ -982,28 +982,6 @@ def test_non_blocking_p2p(self):
982
982
self .assertEqual (send_tensor , recv_tensor )
983
983
dist .destroy_process_group ()
984
984
985
- @skip_but_pass_in_sandcastle_if (not TEST_MULTIGPU , "NCCL test requires 2+ GPUs" )
986
- @parametrize ("eager_init" , [True , False ])
987
- def test_subgroup_p2p (self , eager_init : bool ):
988
- store = c10d .FileStore (self .file_name , self .world_size )
989
- device = torch .device (f"cuda:{ self .rank % torch .cuda .device_count ()} " )
990
- c10d .init_process_group (
991
- "nccl" ,
992
- world_size = self .world_size ,
993
- rank = self .rank ,
994
- store = store ,
995
- device_id = device if eager_init else None ,
996
- )
997
- send_tensor = torch .ones (10 , 10 , device = device )
998
- group = dist .new_group ()
999
- if self .rank == 0 :
1000
- dist .send (send_tensor , 1 , group = group )
1001
- if self .rank == 1 :
1002
- recv_tensor = torch .rand (10 , 10 , device = device )
1003
- dist .recv (recv_tensor , 0 , group = group )
1004
- self .assertEqual (send_tensor , recv_tensor )
1005
- dist .destroy_process_group ()
1006
-
1007
985
@requires_nccl ()
1008
986
@skip_but_pass_in_sandcastle_if (not TEST_MULTIGPU , "NCCL test requires 2+ GPUs" )
1009
987
def test_get_uid (self ):
0 commit comments