Skip to content

Commit

Permalink
Merge pull request #3090 from nest/fix_single2master
Browse files Browse the repository at this point in the history
Change MPI communication to specific thread
  • Loading branch information
heplesser authored Feb 6, 2024
2 parents b8b2347 + 8f3c303 commit 8af12af
Showing 1 changed file with 17 additions and 12 deletions.
29 changes: 17 additions & 12 deletions nestkernel/event_delivery_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -804,13 +804,14 @@ EventDeliveryManager::gather_target_data( const size_t tid )
// otherwise
gather_completed_checker_[ tid ].set_true();

#pragma omp single
#pragma omp master
{
if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ )
{
resize_send_recv_buffers_target_data();
}
} // of omp single; implicit barrier
} // of omp master; (no barrier)
#pragma omp barrier

kernel().connection_manager.restore_source_table_entry_point( tid );

Expand All @@ -828,7 +829,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
#pragma omp barrier
kernel().connection_manager.clean_source_table( tid );

#pragma omp single
#pragma omp master
{
#ifdef TIMER_DETAILED
sw_communicate_target_data_.start();
Expand All @@ -837,19 +838,20 @@ EventDeliveryManager::gather_target_data( const size_t tid )
#ifdef TIMER_DETAILED
sw_communicate_target_data_.stop();
#endif
} // of omp single (implicit barrier)

} // of omp master (no barriers!)
#pragma omp barrier

const bool distribute_completed = distribute_target_data_buffers_( tid );
gather_completed_checker_[ tid ].logical_and( distribute_completed );

// resize mpi buffers, if necessary and allowed
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() )
{
#pragma omp single
#pragma omp master
{
buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data();
}
#pragma omp barrier
}
} // of while

Expand All @@ -874,13 +876,14 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
// assume this is the last gather round and change to false otherwise
gather_completed_checker_[ tid ].set_true();

#pragma omp single
#pragma omp master
{
if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ )
{
resize_send_recv_buffers_target_data();
}
} // of omp single; implicit barrier
} // of omp master; no barrier
#pragma omp barrier

TargetSendBufferPosition send_buffer_position(
assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() );
Expand All @@ -897,7 +900,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )

#pragma omp barrier

#pragma omp single
#pragma omp master
{
#ifdef TIMER_DETAILED
sw_communicate_target_data_.start();
Expand All @@ -906,7 +909,8 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
#ifdef TIMER_DETAILED
sw_communicate_target_data_.stop();
#endif
} // of omp single (implicit barrier)
} // of omp master (no barrier)
#pragma omp barrier

// Up to here, gather_completed_checker_ just has local info: has this thread been able to write
// all data it is responsible for to buffers. Now combine with information on whether other ranks
Expand All @@ -917,10 +921,11 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
// resize mpi buffers, if necessary and allowed
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() )
{
#pragma omp single
#pragma omp master
{
buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data();
}
} // of omp master (no barrier)
#pragma omp barrier
}

} // of while
Expand Down

0 comments on commit 8af12af

Please sign in to comment.