From b858e36c24dab324f4fd951b62540ec76843ea76 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 3 Mar 2025 16:51:46 -0800 Subject: [PATCH] Synchronize velocity for diagnostics (#1751) This PR allows the synchronization in time of the particle velocities and positions when generating diagnostics. Without this option, the particle velocities will lag behind the position by a half time step. This adds the boolean input parameter `warpx.synchronize_velocity_for_diagnostics` to turn on this option, defaulting to false. There are several pieces to this PR: - Changes to `MultiDiagnostic` and `MultiReducedDiags` adding routines to check if any diagnostics will be done - Adds a call to `PushP` to just before the diagnostics are done (to get the updated fields from the electrostatic calculation) - Add the appropriate documentation What `Evolve` does is if the synchronization is to be done, advance the velocity a half step just before the diagnostics and sets `is_synchronized=true`. Then at the start of the next step, if `is_synchronized` is true, push the velocities back a half step to be ready for the full leap frog advance. Comments: - Is the documentation in the correct place in parameters.rst? - The reduced diagnostics could perhaps use the new DoDiags method instead of accessing `m_intervals` in its ComputeDiags. - This PR leaves the original PushP unchanged, even though it is possibly buggy. That PushP fetches the fields, but uses the particle positions before the particle boundary conditions have been applied, leading to a possible out of bounds reference. Also, that PushP may not be consistent with the backwards PushP since the fields may had changed. Comments are added to the code to note this potential problem. I avoided changing this since it breaks many CI tests. --------- Co-authored-by: Edoardo Zoni Co-authored-by: Axel Huebl Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 6 ++ Examples/Tests/single_particle/CMakeLists.txt | 10 ++++ .../analysis_synchronize_velocity.py | 60 +++++++++++++++++++ .../inputs_test_1d_synchronize_velocity | 36 +++++++++++ Python/pywarpx/picmi.py | 9 +++ Source/Diagnostics/MultiDiagnostics.H | 2 + Source/Diagnostics/MultiDiagnostics.cpp | 10 ++++ .../ReducedDiags/MultiReducedDiags.H | 3 + .../ReducedDiags/MultiReducedDiags.cpp | 12 ++++ .../Diagnostics/ReducedDiags/ReducedDiags.H | 3 + Source/Evolve/WarpXEvolve.cpp | 29 +++++++-- Source/WarpX.H | 2 + Source/WarpX.cpp | 2 + 13 files changed, 180 insertions(+), 4 deletions(-) create mode 100755 Examples/Tests/single_particle/analysis_synchronize_velocity.py create mode 100644 Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 735504fc7d7..c31a1cf88b9 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2744,6 +2744,12 @@ WarpX has five types of diagnostics: Similar to what is done for physical species, WarpX has a class Diagnostics that allows users to initialize different diagnostics, each of them with different fields, resolution and period. This currently applies to standard diagnostics, but should be extended to back-transformed diagnostics and reduced diagnostics (and others) in a near future. +* ``warpx.synchronize_velocity_for_diagnostics`` (``0`` or ``1``, optional, default ``0``) + Whether to synchronize the particle velocities with the particle positions in the diagnostics. + In its normal operation, WarpX is using the leap frog algorithm to advance the particles, and leaves the positions and velocities of the particles unsynchronized at the end of each time step, with the velocities lagging behind a half step. + When this option is turned on, whenever any diagnostics will be calculated, the velocities will be advanced a half step to + synchronize with the position before the diagnostics are generated. + .. _running-cpp-parameters-diagnostics-full: Full Diagnostics diff --git a/Examples/Tests/single_particle/CMakeLists.txt b/Examples/Tests/single_particle/CMakeLists.txt index fb823b39431..4282376a20f 100644 --- a/Examples/Tests/single_particle/CMakeLists.txt +++ b/Examples/Tests/single_particle/CMakeLists.txt @@ -10,3 +10,13 @@ add_warpx_test( "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) + +add_warpx_test( + test_1d_synchronize_velocity # name + 1 # dims + 1 # nprocs + inputs_test_1d_synchronize_velocity # inputs + "analysis_synchronize_velocity.py diags/diag1000005" # analysis + OFF # checksum + OFF # dependency +) diff --git a/Examples/Tests/single_particle/analysis_synchronize_velocity.py b/Examples/Tests/single_particle/analysis_synchronize_velocity.py new file mode 100755 index 00000000000..b4540e7e0ba --- /dev/null +++ b/Examples/Tests/single_particle/analysis_synchronize_velocity.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +# Copyright 2025 David Grote +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +import sys + +import numpy as np +import yt + +# scipy.constants use CODATA2022 +# from scipy.constants import e, m_e, c + +# These are CODATA2018 values, as used in WarpX +e = 1.602176634e-19 +m_e = 9.1093837015e-31 +c = 299792458.0 + +# Integrate the test particle 5 timesteps, ending up with the position +# and velocity synchronized. +# In the simulation, with the synchronize_velocity_for_diagnostics flag set, +# the velocity will be synchronized at the end of step 5 when the diagnostics +# are written (even though that is not the final time step). + +z = 0.1 +uz = 0.0 +Ez = -1.0 +dt = 1.0e-6 + +# Half backward advance of velocity +uz -= -e / m_e * Ez * dt / 2.0 + +# Leap frog advance +for _ in range(5): + uz += -e / m_e * Ez * dt + g = np.sqrt((uz / c) ** 2 + 1.0) + z += (uz / g) * dt + +# Add half v advance to synchronize +uz += -e / m_e * Ez * dt / 2.0 + +filename = sys.argv[1] +ds = yt.load(filename) +ad = ds.all_data() +z_sim = ad["electron", "particle_position_x"] +uz_sim = ad["electron", "particle_momentum_z"] / m_e + +print(f"Analysis Z = {z:18.16f}, Uz = {uz:18.10f}") +print(f"Simulation Z = {z_sim.v[0]:18.16f}, Uz = {uz_sim.v[0]:18.10f}") + +tolerance_rel = 1.0e-15 +error_rel = np.abs((uz - uz_sim.v[0]) / uz) + +print("error_rel : " + str(error_rel)) +print("tolerance_rel: " + str(tolerance_rel)) + +assert error_rel < tolerance_rel diff --git a/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity b/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity new file mode 100644 index 00000000000..e0e3b427150 --- /dev/null +++ b/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity @@ -0,0 +1,36 @@ +max_step = 8 +amr.n_cell = 8 +amr.max_level = 0 +amr.blocking_factor = 8 +amr.max_grid_size = 8 +geometry.dims = 1 +geometry.prob_lo = 0 +geometry.prob_hi = 3 + +# Boundary condition +boundary.field_lo = pec +boundary.field_hi = pec + +algo.maxwell_solver = none + +warpx.const_dt = 1.e-6 +warpx.synchronize_velocity_for_diagnostics = 1 + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = electron +electron.species_type = electron +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.0 0.0 0.1 +electron.single_particle_u = 0.0 0.0 0.0 +electron.single_particle_weight = 1.0 + +# Apply a uniform Ez +particles.E_ext_particle_init_style = constant +particles.E_external_particle = 0.0 0.0 -1.0 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 5 +diag1.diag_type = Full diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 9606fc70136..7e08acb3cec 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2898,6 +2898,11 @@ class Simulation(picmistandard.PICMI_Simulation): warpx_checkpoint_signals: list of strings Signals on which to write out a checkpoint + warpx_synchronize_velocity: bool, default=False + Flags whether the particle velocities are synchronized in time with + the positions in the diagnostics. When False, the particles are + one half step behind the positions (except for the final diagnostic). + warpx_numprocs: list of ints (1 in 1D, 2 in 2D, 3 in 3D) Domain decomposition on the coarsest level. The domain will be chopped into the exact number of pieces in each dimension as specified by this parameter. @@ -3017,6 +3022,8 @@ def init(self, kw): self.reduced_diags_separator = kw.pop("warpx_reduced_diags_separator", None) self.reduced_diags_precision = kw.pop("warpx_reduced_diags_precision", None) + self.synchronize_velocity = kw.pop("warpx_synchronize_velocity", None) + self.inputs_initialized = False self.warpx_initialized = False @@ -3081,6 +3088,8 @@ def initialize_inputs(self): pywarpx.warpx.break_signals = self.break_signals pywarpx.warpx.checkpoint_signals = self.checkpoint_signals + pywarpx.warpx.synchronize_velocity_for_diagnostics = self.synchronize_velocity + pywarpx.warpx.numprocs = self.numprocs reduced_diags = pywarpx.warpx.get_bucket("reduced_diags") diff --git a/Source/Diagnostics/MultiDiagnostics.H b/Source/Diagnostics/MultiDiagnostics.H index a22e20b44da..d4a9e762bbc 100644 --- a/Source/Diagnostics/MultiDiagnostics.H +++ b/Source/Diagnostics/MultiDiagnostics.H @@ -23,6 +23,8 @@ public: void ReadParameters (); /** \brief Loop over diags in alldiags and call their InitDiags */ void InitData (); + /** Check if any diagnostics will do compute and pack. */ + bool DoComputeAndPack (int step, bool force_flush=false); /** \brief Called at each iteration. Compute diags and flush. */ void FilterComputePackFlush (int step, bool force_flush=false, bool BackTransform=false); /** \brief Called only at the last iteration. Loop over each diag and if m_dump_last_timestep diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index 2119ac276f9..24285f6460a 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -77,6 +77,16 @@ MultiDiagnostics::ReadParameters () } } +bool +MultiDiagnostics::DoComputeAndPack (int step, bool force_flush) +{ + bool result = false; + for( auto& diag : alldiags ){ + result = result || diag->DoComputeAndPack(step, force_flush); + } + return result; +} + void MultiDiagnostics::FilterComputePackFlush (int step, bool force_flush, bool BackTransform) { diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H index 5a782db7118..bf43d0c6d69 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H @@ -57,6 +57,9 @@ public: * @param[in] step current iteration time */ void WriteToFile (int step); + /** Check if any diagnostics will be done */ + bool DoDiags(int step); + /** \brief Loop over all ReducedDiags and call their WriteCheckpointData * @param[in] dir checkpoint directory */ void WriteCheckpointData (std::string const & dir); diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index e4c982f7323..279f9a1c2ca 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -161,6 +161,18 @@ void MultiReducedDiags::WriteToFile (int step) } // end void MultiReducedDiags::WriteToFile +// Check if any diagnostics will be done +bool MultiReducedDiags::DoDiags(int step) +{ + bool result = false; + for (int i_rd = 0; i_rd < static_cast(m_rd_names.size()); ++i_rd) + { + result = result || m_multi_rd[i_rd] -> DoDiags(step); + } + return result; +} +// end bool MultiReducedDiags::DoDiags + void MultiReducedDiags::WriteCheckpointData (std::string const & dir) { // Only the I/O rank does diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.H b/Source/Diagnostics/ReducedDiags/ReducedDiags.H index a32de30cc6f..7b61501e27c 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.H @@ -97,6 +97,9 @@ public: */ virtual void WriteToFile (int step) const; + /** Check if diag should be done */ + [[nodiscard]] bool DoDiags(int step) const { return m_intervals.contains(step+1); } + /** * \brief Write out checkpoint related data * diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 5593642a944..5843cab0227 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -83,6 +83,13 @@ WarpX::Synchronize () { using ablastr::fields::Direction; using warpx::fields::FieldType; + // Note that this is potentially buggy since the PushP will do a field gather + // using particles that have been pushed but not yet checked at the boundaries. + // Also, this PushP may be inconsistent with the PushP backwards above since + // the fields may change between the two (mainly effecting the Python version when + // using electrostatics). + // When synchronize_velocity_for_diagnostics is true, the PushP at the end of the + // step is used so that the correct behavior is obtained. FillBoundaryE(guard_cells.ng_FieldGather); FillBoundaryB(guard_cells.ng_FieldGather); if (fft_do_time_averaging) @@ -235,9 +242,12 @@ WarpX::Evolve (int numsteps) } // TODO: move out + bool const end_of_step_loop = (step == numsteps_max - 1) || (cur_time + dt[0] >= stop_time - 1.e-3*dt[0]); if (evolve_scheme == EvolveScheme::Explicit) { - // At the end of last step, push p by 0.5*dt to synchronize - if (cur_time + dt[0] >= stop_time - 1.e-3*dt[0] || step == numsteps_max-1) { + // At the end of step loop, push p by 0.5*dt to synchronize + // This synchronization is not at the correct place since it is done before the window is moved, + // before particles are scraped, and before the electrostatic field update + if (end_of_step_loop && !synchronize_velocity_for_diagnostics) { Synchronize(); } } @@ -309,6 +319,15 @@ WarpX::Evolve (int numsteps) ExecutePythonCallback("afterEsolve"); } + bool const do_diagnostic = (multi_diags->DoComputeAndPack(step) || reduced_diags->DoDiags(step)); + if (synchronize_velocity_for_diagnostics && + (do_diagnostic || end_of_step_loop)) { + // When the diagnostics require synchronization, push p by 0.5*dt to synchronize. + // Note that this will be undone at the start of the next step by the half v-push + // backwards. + Synchronize(); + } + // afterstep callback runs with the updated global time. It is included // in the evolve timing. ExecutePythonCallback("afterstep"); @@ -353,8 +372,10 @@ WarpX::Evolve (int numsteps) // This if statement is needed for PICMI, which allows the Evolve routine to be // called multiple times, otherwise diagnostics will be done at every call, // regardless of the diagnostic period parameter provided in the inputs. - if (istep[0] == max_step || (stop_time - 1.e-3*dt[0] <= cur_time && cur_time < stop_time + dt[0]) - || m_exit_loop_due_to_interrupt_signal) { + bool const final_time_step = (istep[0] == max_step) + || (cur_time >= stop_time - 1.e-3*dt[0] + && cur_time < stop_time + dt[0]); + if (final_time_step || m_exit_loop_due_to_interrupt_signal) { multi_diags->FilterComputePackFlushLastTimestep( istep[0] ); if (m_exit_loop_due_to_interrupt_signal) { ExecutePythonCallback("onbreaksignal"); } } diff --git a/Source/WarpX.H b/Source/WarpX.H index 4f6024d426d..b70d23b9e33 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1460,6 +1460,8 @@ private: amrex::VisMF::Header::Version plotfile_headerversion = amrex::VisMF::Header::Version_v1; amrex::VisMF::Header::Version slice_plotfile_headerversion = amrex::VisMF::Header::Version_v1; + bool synchronize_velocity_for_diagnostics = false; + bool use_single_read = true; bool use_single_write = true; int mffile_nstreams = 4; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 6c06fbe97bc..f129987b991 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -926,6 +926,8 @@ WarpX::ReadParameters () "J-damping can only be done when PML are inside simulation domain (do_pml_in_domain=1)" ); + pp_warpx.query("synchronize_velocity_for_diagnostics", synchronize_velocity_for_diagnostics); + { // Parameters below control all plotfile diagnostics bool plotfile_min_max = true;