-
Notifications
You must be signed in to change notification settings - Fork 200
/
Copy pathWarpX.H
1708 lines (1435 loc) · 73.3 KB
/
WarpX.H
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Copyright 2016-2020 Andrew Myers, Ann Almgren, Aurore Blelly
* Axel Huebl, Burlen Loring, David Grote
* Glenn Richardson, Junmin Gu, Luca Fedeli
* Mathieu Lobet, Maxence Thevenet, Michael Rowan
* Remi Lehe, Revathi Jambunathan, Weiqun Zhang
* Yinjian Zhao
*
* This file is part of WarpX.
*
* License: BSD-3-Clause-LBNL
*/
#ifndef WARPX_H_
#define WARPX_H_
#include "BoundaryConditions/PEC_Insulator_fwd.H"
#include "BoundaryConditions/PML_fwd.H"
#include "Diagnostics/MultiDiagnostics_fwd.H"
#include "Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H"
#include "EmbeddedBoundary/WarpXFaceInfoBox_fwd.H"
#include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver_fwd.H"
#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H"
#include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H"
#include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel_fwd.H"
#include "Filter/NCIGodfreyFilter_fwd.H"
#include "Initialization/ExternalField_fwd.H"
#include "Particles/ParticleBoundaryBuffer_fwd.H"
#include "Particles/MultiParticleContainer_fwd.H"
#include "Particles/WarpXParticleContainer_fwd.H"
#include "Fluids/MultiFluidContainer_fwd.H"
#include "Fluids/WarpXFluidContainer_fwd.H"
#ifdef WARPX_USE_FFT
# ifdef WARPX_DIM_RZ
# include "FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H"
# include "BoundaryConditions/PML_RZ_fwd.H"
# else
# include "FieldSolver/SpectralSolver/SpectralSolver_fwd.H"
# endif
#endif
#include "AcceleratorLattice/AcceleratorLattice.H"
#include "Evolve/WarpXDtType.H"
#include "Evolve/WarpXPushType.H"
#include "Fields.H"
#include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H"
#include "FieldSolver/ImplicitSolvers/ImplicitSolver.H"
#include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H"
#include "Filter/BilinearFilter.H"
#include "Parallelization/GuardCellManager.H"
#include "Utils/Parser/IntervalsParser.H"
#include "Utils/WarpXAlgorithmSelection.H"
#include "Utils/export.H"
#include <ablastr/fields/MultiFabRegister.H>
#include <ablastr/utils/Enums.H>
#include <AMReX.H>
#include <AMReX_AmrCore.H>
#include <AMReX_Array.H>
#include <AMReX_Config.H>
#ifdef AMREX_USE_EB
# include <AMReX_EBFabFactory.H>
#endif
#include <AMReX_GpuContainers.H>
#include <AMReX_IntVect.H>
#include <AMReX_LayoutData.H>
#include <AMReX_Parser.H>
#include <AMReX_REAL.H>
#include <AMReX_RealBox.H>
#include <AMReX_RealVect.H>
#include <AMReX_Vector.H>
#include <AMReX_VisMF.H>
#include <AMReX_BaseFwd.H>
#include <AMReX_AmrCoreFwd.H>
#include <array>
#include <iostream>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>
class WARPX_EXPORT WarpX
: public amrex::AmrCore
{
public:
static WarpX& GetInstance ();
static void ResetInstance ();
/**
* \brief
* This method has to be called at the end of the simulation. It deletes the WarpX instance.
*/
static void Finalize();
/** Destructor */
~WarpX () override;
/** Copy constructor */
WarpX ( WarpX const &) = delete;
/** Copy operator */
WarpX& operator= ( WarpX const & ) = delete;
/** Move constructor */
WarpX ( WarpX && ) = delete;
/** Move operator */
WarpX& operator= ( WarpX && ) = delete;
static std::string Version (); //!< Version of WarpX executable
static std::string PicsarVersion (); //!< Version of PICSAR dependency
[[nodiscard]] int Verbose () const { return verbose; }
[[nodiscard]] const amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>& GetFieldBoundaryLo () const
{
return field_boundary_lo;
}
[[nodiscard]] const amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>& GetFieldBoundaryHi () const
{
return field_boundary_hi;
}
void InitData ();
void Evolve (int numsteps = -1);
/** Push momentum one half step forward to synchronize with position.
* Also sets is_synchronized to `true`.
*/
void Synchronize ();
//
// Functions used by implicit solvers
//
void ImplicitPreRHSOp ( amrex::Real cur_time,
amrex::Real a_full_dt,
int a_nl_iter,
bool a_from_jacobian );
void SaveParticlesAtImplicitStepStart ();
void FinishImplicitParticleUpdate ();
void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E, amrex::Real a_time );
void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn,
amrex::Real a_thetadt, amrex::Real start_time );
void SpectralSourceFreeFieldAdvance ( amrex::Real start_time);
void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn,
amrex::Real a_theta, amrex::Real a_time );
void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp,
const ablastr::fields::MultiLevelVectorField& Field_n,
amrex::Real theta );
void ImplicitComputeRHSE ( amrex::Real dt, WarpXSolverVec& a_Erhs_vec);
void ImplicitComputeRHSE (int lev, amrex::Real dt, WarpXSolverVec& a_Erhs_vec);
void ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real dt, WarpXSolverVec& a_Erhs_vec);
MultiParticleContainer& GetPartContainer () { return *mypc; }
MultiFluidContainer& GetFluidContainer () { return *myfl; }
ElectrostaticSolver& GetElectrostaticSolver () {return *m_electrostatic_solver;}
[[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); }
MultiDiagnostics& GetMultiDiags () {return *multi_diags;}
ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; }
amrex::Vector<std::array< std::unique_ptr<amrex::iMultiFab>,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; }
amrex::Vector<std::array< std::unique_ptr<amrex::iMultiFab>,3 > >& GetEBUpdateBFlag() { return m_eb_update_B; }
amrex::Vector< std::unique_ptr<amrex::iMultiFab> > const & GetEBReduceParticleShapeFlag() const { return m_eb_reduce_particle_shape; }
/**
* \brief
* If an authors' string is specified in the inputfile, this method returns that string.
* Otherwise, it returns an empty string.
*/
[[nodiscard]] std::string GetAuthors () const { return m_authors; }
/** Maximum level up to which the externally defined electric and magnetic fields are initialized.
* The default value is set to the max levels in the simulation.
* if lev > maxlevel_extEMfield_init, the fields on those levels will have a default value of 0
*/
int maxlevel_extEMfield_init;
// Algorithms
//! Integer that corresponds to the current deposition algorithm (Esirkepov, direct, Vay, Villasenor)
static inline auto current_deposition_algo = CurrentDepositionAlgo::Default;
//! Integer that corresponds to the charge deposition algorithm (only standard deposition)
static inline auto charge_deposition_algo = ChargeDepositionAlgo::Default;
//! Integer that corresponds to the field gathering algorithm (energy-conserving, momentum-conserving)
static inline auto field_gathering_algo = GatheringAlgo::Default;
//! Integer that corresponds to the particle push algorithm (Boris, Vay, Higuera-Cary)
static inline auto particle_pusher_algo = ParticlePusherAlgo::Default;
//! Integer that corresponds to the type of Maxwell solver (Yee, CKC, PSATD, ECT)
static inline auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default;
//! Integer that corresponds to the evolve scheme (explicit, semi_implicit_em, theta_implicit_em)
EvolveScheme evolve_scheme = EvolveScheme::Default;
//! Maximum iterations used for self-consistent particle update in implicit particle-suppressed evolve schemes
static int max_particle_its_in_implicit_scheme;
//! Relative tolerance used for self-consistent particle update in implicit particle-suppressed evolve schemes
static amrex::ParticleReal particle_tol_in_implicit_scheme;
/** Records a number corresponding to the load balance cost update strategy
* being used (0 or 1 corresponding to timers or heuristic).
*/
static inline auto load_balance_costs_update_algo = LoadBalanceCostsUpdateAlgo::Default;
/** Integer that correspond to macroscopic Maxwell solver algorithm
* (BackwardEuler - 0, Lax-Wendroff - 1)
*/
static inline auto macroscopic_solver_algo = MacroscopicSolverAlgo::Default;
/** Boundary conditions applied to fields at the lower domain boundaries
* (Possible values PML, Periodic, PEC, PMC, Neumann, Damped, Absorbing Silver-Mueller, None)
*/
static inline amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>
field_boundary_lo {AMREX_D_DECL(FieldBoundaryType::Default,
FieldBoundaryType::Default,
FieldBoundaryType::Default)};
/** Boundary conditions applied to fields at the upper domain boundaries
* (Possible values PML, Periodic, PEC, PMC, Neumann, Damped, Absorbing Silver-Mueller, None)
*/
static inline amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>
field_boundary_hi {AMREX_D_DECL(FieldBoundaryType::Default,
FieldBoundaryType::Default,
FieldBoundaryType::Default)};
/** Integers that correspond to boundary condition applied to particles at the
* lower domain boundaries
* (0 to 4 correspond to Absorbing, Open, Reflecting, Periodic, Thermal)
*/
static inline amrex::Array<ParticleBoundaryType,AMREX_SPACEDIM>
particle_boundary_lo {AMREX_D_DECL(ParticleBoundaryType::Default,
ParticleBoundaryType::Default,
ParticleBoundaryType::Default)};
/** Integers that correspond to boundary condition applied to particles at the
* upper domain boundaries
* (0 to 4 correspond to Absorbing, Open, Reflecting, Periodic, Thermal)
*/
static inline amrex::Array<ParticleBoundaryType,AMREX_SPACEDIM>
particle_boundary_hi {AMREX_D_DECL(ParticleBoundaryType::Default,
ParticleBoundaryType::Default,
ParticleBoundaryType::Default)};
//! Integers that correspond to the time dependency of J (constant, linear)
//! and rho (linear, quadratic) for the PSATD algorithm
static inline auto J_in_time = JInTime::Default;
static inline auto rho_in_time = RhoInTime::Default;
//! If true, the current is deposited on a nodal grid and then centered onto a staggered grid
//! using finite centering of order given by #current_centering_nox, #current_centering_noy,
//! and #current_centering_noz
bool do_current_centering = false;
//! If true, a correction is applied to the current in Fourier space,
// to satisfy the continuity equation and charge conservation
bool current_correction = true;
//! If true, the PSATD update equation for E contains both J and rho
//! (default is false for standard PSATD and true for Galilean PSATD)
bool update_with_rho = false;
//! perform field communications in single precision
static bool do_single_precision_comms;
//! used shared memory algorithm for charge deposition
static bool do_shared_mem_charge_deposition;
//! use shared memory algorithm for current deposition
static bool do_shared_mem_current_deposition;
//! number of threads to use per block in shared deposition
static int shared_mem_current_tpb;
//! tileSize to use for shared current deposition operations
static amrex::IntVect shared_tilesize;
//! Whether to fill guard cells when computing inverse FFTs of fields
static amrex::IntVect m_fill_guards_fields;
//! Whether to fill guard cells when computing inverse FFTs of currents
static amrex::IntVect m_fill_guards_current;
//! Solve additional Maxwell equation for F in order to control errors in Gauss' law
//! (useful when using current deposition algorithms that are not charge-conserving)
static bool do_dive_cleaning;
//! Solve additional Maxwell equation for G in order to control errors in magnetic Gauss' law
static bool do_divb_cleaning;
//! Order of the particle shape factors (splines) along x
static int nox;
//! Order of the particle shape factors (splines) along y
static int noy;
//! Order of the particle shape factors (splines) along z
static int noz;
//! Order of finite centering of fields (from staggered grid to nodal grid), along x
static int field_centering_nox;
//! Order of finite centering of fields (from staggered grid to nodal grid), along y
static int field_centering_noy;
//! Order of finite centering of fields (from staggered grid to nodal grid), along z
static int field_centering_noz;
//! Order of finite centering of currents (from nodal grid to staggered grid), along x
static int current_centering_nox;
//! Order of finite centering of currents (from nodal grid to staggered grid), along y
static int current_centering_noy;
//! Order of finite centering of currents (from nodal grid to staggered grid), along z
static int current_centering_noz;
//! Number of modes for the RZ multi-mode version
static int n_rz_azimuthal_modes;
//! Number of MultiFab components
//! (with the RZ multi-mode version, each mode has a real and imaginary component,
//! except mode 0 which is purely real: component 0 is mode 0, odd components are
//! the real parts, even components are the imaginary parts)
static int ncomps;
//! If true, a Numerical Cherenkov Instability (NCI) corrector is applied
//! (for simulations using the FDTD Maxwell solver)
static bool use_fdtd_nci_corr;
//! If true (Galerkin method): The fields are interpolated directly from the staggered
//! grid to the particle positions (with reduced interpolation order in the parallel
//! direction). This scheme is energy conserving in the limit of infinitely small time
//! steps.
//! Otherwise, "momentum conserving" (in the same limit): The fields are first
//! interpolated from the staggered grid points to the corners of each cell, and then
//! from the cell corners to the particle position (with the same order of interpolation
//! in all directions). This scheme is momentum conserving in the limit of infinitely
//! small time steps.
static bool galerkin_interpolation;
//! If true, a bilinear filter is used to smooth charge and currents
static bool use_filter;
//! If true, the bilinear filtering of charge and currents is done in Fourier space
static bool use_kspace_filter;
//! If true, a compensation step is added to the bilinear filtering of charge and currents
static bool use_filter_compensation;
//! If true, the initial conditions from random number generators are serialized (useful for reproducible testing with OpenMP)
static bool serialize_initial_conditions;
//! Lorentz factor of the boosted frame in which a boosted-frame simulation is run
static amrex::Real gamma_boost;
//! Beta value corresponding to the Lorentz factor of the boosted frame of the simulation
static amrex::Real beta_boost;
//! Direction of the Lorentz transform that defines the boosted frame of the simulation
static amrex::Vector<int> boost_direction;
//! store initial value of zmin_domain_boost because WarpX::computeMaxStepBoostAccelerator
//! needs the initial value of zmin_domain_boost, even if restarting from a checkpoint file
static amrex::Real zmin_domain_boost_step_0;
//! If true, the code will compute max_step from the back transformed diagnostics
static bool compute_max_step_from_btd;
static bool do_dynamic_scheduling;
static bool refine_plasma;
static utils::parser::IntervalsParser sort_intervals;
static amrex::IntVect sort_bin_size;
static bool do_multi_J;
static int do_multi_J_n_depositions;
//! With mesh refinement, particles located inside a refinement patch, but within
//! #n_field_gather_buffer cells of the edge of the patch, will gather the fields
//! from the lower refinement level instead of the refinement patch itself
static int n_field_gather_buffer;
//! With mesh refinement, particles located inside a refinement patch, but within
//! #n_current_deposition_buffer cells of the edge of the patch, will deposit their charge
//! and current onto the lower refinement level instead of the refinement patch itself
static int n_current_deposition_buffer;
//! Integer that corresponds to the type of grid used in the simulation
//! (collocated, staggered, hybrid)
static inline auto grid_type = ablastr::utils::enums::GridType::Default;
// Global rho nodal flag to know about rho index type when rho MultiFab is not allocated
amrex::IntVect m_rho_nodal_flag;
/**
* \brief
* Allocate and optionally initialize the iMultiFab. This also adds the iMultiFab
* to the map of MultiFabs (used to ease the access to MultiFabs from the Python
* interface
*
* \param[out] mf The iMultiFab unique pointer to be allocated
* \param[in] ba The BoxArray describing the iMultiFab
* \param[in] dm The DistributionMapping describing the iMultiFab
* \param[in] ncomp The number of components in the iMultiFab
* \param[in] ngrow The number of guard cells in the iMultiFab
* \param[in] level The refinement level
* \param[in] name The name of the iMultiFab to use in the map
* \param[in] initial_value The optional initial value
*/
void AllocInitMultiFab (
std::unique_ptr<amrex::iMultiFab>& mf,
const amrex::BoxArray& ba,
const amrex::DistributionMapping& dm,
int ncomp,
const amrex::IntVect& ngrow,
int level,
const std::string& name,
std::optional<const int> initial_value = {});
// Maps of all of the iMultiFabs used (this can include MFs from other classes)
// This is a convenience for the Python interface, allowing all iMultiFabs
// to be easily referenced from Python.
std::map<std::string, amrex::iMultiFab *> imultifab_map;
/**
* \brief
* Get pointer to the amrex::MultiFab containing the dotMask for the specified field
*/
[[nodiscard]] const amrex::iMultiFab*
getFieldDotMaskPointer (warpx::fields::FieldType field_type, int lev, ablastr::fields::Direction dir) const;
[[nodiscard]] bool DoPML () const {return do_pml;}
[[nodiscard]] bool DoFluidSpecies () const {return do_fluid_species;}
#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT)
const PML_RZ* getPMLRZ() {return pml_rz[0].get();}
#endif
/** get low-high-low-high-... vector for each direction indicating if mother grid PMLs are enabled */
[[nodiscard]] std::vector<bool> getPMLdirections() const;
static amrex::LayoutData<amrex::Real>* getCosts (int lev);
void setLoadBalanceEfficiency (int lev, amrex::Real efficiency);
amrex::Real getLoadBalanceEfficiency (int lev);
static amrex::IntVect filter_npass_each_dir;
BilinearFilter bilinear_filter;
amrex::Vector< std::unique_ptr<NCIGodfreyFilter> > nci_godfrey_filter_exeybz;
amrex::Vector< std::unique_ptr<NCIGodfreyFilter> > nci_godfrey_filter_bxbyez;
amrex::Real time_of_last_gal_shift = 0;
amrex::Vector<amrex::Real> m_v_galilean = amrex::Vector<amrex::Real>(3, amrex::Real(0.));
amrex::Array<amrex::Real,3> m_galilean_shift = {{0}};
amrex::Vector<amrex::Real> m_v_comoving = amrex::Vector<amrex::Real>(3, amrex::Real(0.));
/// object with all reduced diagnostics, similar to MultiParticleContainer for species.
std::unique_ptr<MultiReducedDiags> reduced_diags;
void applyMirrors(amrex::Real time);
/** Determine the timestep of the simulation. */
void ComputeDt ();
/**
* Determine the simulation timestep from the maximum speed of all particles
* Sets timestep so that a particle can only cross cfl*dx cells per timestep.
*/
void UpdateDtFromParticleSpeeds ();
/** Print main PIC parameters to stdout */
void PrintMainPICparameters ();
/** Write a file that record all inputs: inputs file + command line options */
void WriteUsedInputsFile () const;
/**
* \brief
* Compute the last time step of the simulation
* Calls computeMaxStepBoostAccelerator() if required.
*/
void ComputeMaxStep ();
// Compute max_step automatically for simulations in a boosted frame.
void computeMaxStepBoostAccelerator();
/** \brief Move the moving window
* \param step Time step
* \param move_j whether the current (and the charge, if allocated) is shifted or not
*/
int MoveWindow (int step, bool move_j);
/**
* \brief
* This function shifts the boundary of the grid by 'm_v_galilean*dt'.
* In doding so, only positions attributes are changed while fields remain unchanged.
*/
void ShiftGalileanBoundary ();
/**
* \brief Update injection position for continuous injection of
* particles and lasers (loops over species and lasers).
*/
void UpdateInjectionPosition (amrex::Real dt);
void ResetProbDomain (const amrex::RealBox& rb);
void EvolveE ( amrex::Real dt, amrex::Real start_time);
void EvolveE (int lev, amrex::Real dt, amrex::Real start_time);
void EvolveB ( amrex::Real dt, DtType dt_type, amrex::Real start_time);
void EvolveB (int lev, amrex::Real dt, DtType dt_type, amrex::Real start_time);
void EvolveF ( amrex::Real dt, DtType dt_type);
void EvolveF (int lev, amrex::Real dt, DtType dt_type);
void EvolveG ( amrex::Real dt, DtType dt_type);
void EvolveG (int lev, amrex::Real dt, DtType dt_type);
void EvolveB (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type, amrex::Real start_time);
void EvolveE (int lev, PatchType patch_type, amrex::Real dt, amrex::Real start_time);
void EvolveF (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type);
void EvolveG (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type);
void MacroscopicEvolveE ( amrex::Real dt, amrex::Real start_time);
void MacroscopicEvolveE (int lev, amrex::Real dt, amrex::Real start_time);
void MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real dt, amrex::Real start_time);
/**
* \brief Hybrid-PIC field evolve function.
* This function contains the logic involved in evolving the electric and
* magnetic fields in the hybrid PIC algorithm.
*/
void HybridPICEvolveFields ();
/**
* \brief Hybrid-PIC initial deposition function.
* The hybrid-PIC algorithm uses the charge and current density from both
* the current and previous step when updating the fields. This function
* deposits the initial ion charge and current (before the first particle
* push), to be used in the ``HybridPICEvolveFields()`` function.
*/
void HybridPICDepositInitialRhoAndJ ();
/** apply QED correction on electric field
*
* \param dt vector of time steps (for all levels)
*/
void Hybrid_QED_Push ( amrex::Vector<amrex::Real> dt);
/** apply QED correction on electric field for level lev
*
* \param lev mesh refinement level
* \param dt time step
*/
void Hybrid_QED_Push (int lev, amrex::Real dt);
/** apply QED correction on electric field for level lev and patch type patch_type
*
* \param lev mesh refinement level
* \param patch_type which MR patch: PatchType::fine or PatchType::coarse
* \param dt time step
*/
void Hybrid_QED_Push (int lev, PatchType patch_type, amrex::Real dt);
amrex::Real m_quantum_xi_c2;
/** Check and potentially compute load balancing
*/
void CheckLoadBalance (int step);
/** \brief perform load balance; compute and communicate new `amrex::DistributionMapping`
*/
void LoadBalance ();
/** \brief resets costs to zero
*/
void ResetCosts ();
/** Perform running average of the LB costs
*
* Only needed for timers cost update, heuristic load balance considers the
* instantaneous costs.
* This gives more importance to most recent costs.
*/
void RescaleCosts (int step);
/** \brief returns the load balance interval
*/
[[nodiscard]] utils::parser::IntervalsParser get_load_balance_intervals () const
{
return load_balance_intervals;
}
/**
* \brief Private function for spectral solver
* Applies a damping factor in the guards cells that extend
* beyond the extent of the domain, reducing fluctuations that
* can appear in parallel simulations. This will be called
* when FieldBoundaryType is set to damped. Vector version.
*/
void DampFieldsInGuards (int lev,
const ablastr::fields::VectorField& Efield,
const ablastr::fields::VectorField& Bfield);
/**
* \brief Private function for spectral solver
* Applies a damping factor in the guards cells that extend
* beyond the extent of the domain, reducing fluctuations that
* can appear in parallel simulations. This will be called
* when FieldBoundaryType is set to damped. Scalar version.
*/
void DampFieldsInGuards (int lev, amrex::MultiFab* mf);
#ifdef WARPX_DIM_RZ
void ApplyInverseVolumeScalingToCurrentDensity(amrex::MultiFab* Jx,
amrex::MultiFab* Jy,
amrex::MultiFab* Jz,
int lev) const;
void ApplyInverseVolumeScalingToChargeDensity(amrex::MultiFab* Rho,
int lev) const;
#endif
/**
* \brief If a PEC boundary conditions is used the charge
* density deposited in the guard cells have to be reflected back into
* the simulation domain. This function performs that logic.
*/
void ApplyRhofieldBoundary (int lev, amrex::MultiFab* Rho,
PatchType patch_type);
/**
* \brief If a PEC boundary conditions is used the current
* density deposited in the guard cells have to be reflected back into
* the simulation domain. This function performs that logic.
*/
void ApplyJfieldBoundary (int lev, amrex::MultiFab* Jx,
amrex::MultiFab* Jy, amrex::MultiFab* Jz,
PatchType patch_type);
void ApplyEfieldBoundary (int lev, PatchType patch_type, amrex::Real cur_time);
void ApplyBfieldBoundary (int lev, PatchType patch_type, DtType dt_type, amrex::Real cur_time);
#ifdef WARPX_DIM_RZ
// Applies the boundary conditions that are specific to the axis when in RZ.
void ApplyFieldBoundaryOnAxis (amrex::MultiFab* Er, amrex::MultiFab* Et, amrex::MultiFab* Ez, int lev) const;
#endif
/**
* \brief When the Ohm's law solver is used, the electron pressure values
* on PEC boundaries are set to enforce a zero derivative Neumann condition,
* otherwise the E_perp values have large spikes (that grows as 1/dx). This
* has to be done since the E-field is algebraically calculated instead of
* evolved which means the 1/dx growth is not avoided by multiplication
* with dt as happens in the B-field evolve.
*/
void ApplyElectronPressureBoundary (int lev, PatchType patch_type);
void DampPML ();
void DampPML (int lev);
void DampPML (int lev, PatchType patch_type);
void DampPML_Cartesian (int lev, PatchType patch_type);
void DampJPML ();
void DampJPML (int lev);
void DampJPML (int lev, PatchType patch_type);
void CopyJPML ();
/** True if any of the particle boundary condition type is Thermal */
static bool isAnyParticleBoundaryThermal();
PML* GetPML (int lev);
#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT)
PML_RZ* GetPML_RZ (int lev);
#endif
/** Run the ionization module on all species */
void doFieldIonization ();
#ifdef WARPX_QED
/** Run the QED module on all species */
void doQEDEvents ();
#endif
void PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type=DtType::Full, bool skip_current=false,
PushType push_type=PushType::Explicit);
void PushParticlesandDeposit (amrex::Real cur_time, bool skip_current=false,
PushType push_type=PushType::Explicit);
// This function does aux(lev) = fp(lev) + I(aux(lev-1)-cp(lev)).
// Caller must make sure fp and cp have ghost cells filled.
void UpdateAuxilaryData ();
void UpdateAuxilaryDataStagToNodal ();
void UpdateAuxilaryDataSameType ();
/**
* \brief This function is called if \c warpx.do_current_centering = 1 and
* it centers the currents from a nodal grid to a staggered grid (Yee) using
* finite-order interpolation based on the Fornberg coefficients.
*
* \param[in,out] dst destination \c MultiFab where the results of the finite-order centering are stored
* \param[in] src source \c MultiFab that contains the values of the nodal current to be centered
*/
void UpdateCurrentNodalToStag (amrex::MultiFab& dst, amrex::MultiFab const& src);
// Fill boundary cells including coarse/fine boundaries
void FillBoundaryB (amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryE (amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryB_avg (amrex::IntVect ng);
void FillBoundaryE_avg (amrex::IntVect ng);
void FillBoundaryF (amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryG (amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryAux (amrex::IntVect ng);
void FillBoundaryE (int lev, amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryB (int lev, amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryE_avg (int lev, amrex::IntVect ng);
void FillBoundaryB_avg (int lev, amrex::IntVect ng);
void FillBoundaryF (int lev, amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryG (int lev, amrex::IntVect ng, std::optional<bool> nodal_sync = std::nullopt);
void FillBoundaryAux (int lev, amrex::IntVect ng);
/**
* \brief Synchronize J and rho:
* filter (if used), exchange guard cells, interpolate across MR levels
* and apply boundary conditions.
* Contains separate calls to WarpX::SyncCurrent and WarpX::SyncRho.
*/
void SyncCurrentAndRho ();
/**
* \brief Apply filter and sum guard cells across MR levels.
* If current centering is used, center the current from a nodal grid
* to a staggered grid. For each MR level beyond level 0, interpolate
* the fine-patch current onto the coarse-patch current at the same level.
* Then, for each MR level, including level 0, apply filter and sum guard
* cells across levels.
*
* \param[in] current_fp_string the coarse of fine patch to use for current
*/
void SyncCurrent (const std::string& current_fp_string);
void SyncRho ();
void SyncRho (
const ablastr::fields::MultiLevelScalarField& charge_fp,
const ablastr::fields::MultiLevelScalarField& charge_cp,
ablastr::fields::MultiLevelScalarField const & charge_buffer);
[[nodiscard]] amrex::Vector<int> getnsubsteps () const {return nsubsteps;}
[[nodiscard]] int getnsubsteps (int lev) const {return nsubsteps[lev];}
[[nodiscard]] amrex::Vector<int> getistep () const {return istep;}
[[nodiscard]] int getistep (int lev) const {return istep[lev];}
void setistep (int lev, int ii) {istep[lev] = ii;}
[[nodiscard]] amrex::Vector<amrex::Real> gett_old () const {return t_old;}
[[nodiscard]] amrex::Real gett_old (int lev) const {return t_old[lev];}
[[nodiscard]] amrex::Vector<amrex::Real> gett_new () const {return t_new;}
[[nodiscard]] amrex::Real gett_new (int lev) const {return t_new[lev];}
void sett_new (int lev, amrex::Real time) {t_new[lev] = time;}
[[nodiscard]] amrex::Vector<amrex::Real> getdt () const {return dt;}
[[nodiscard]] amrex::Real getdt (int lev) const {return dt.at(lev);}
[[nodiscard]] int getdo_moving_window() const {return do_moving_window;}
[[nodiscard]] amrex::Real getmoving_window_x() const {return moving_window_x;}
[[nodiscard]] bool getis_synchronized() const {return is_synchronized;}
[[nodiscard]] int maxStep () const {return max_step;}
void updateMaxStep (const int new_max_step) {max_step = new_max_step;}
[[nodiscard]] amrex::Real stopTime () const {return stop_time;}
void updateStopTime (const amrex::Real new_stop_time) {stop_time = new_stop_time;}
static std::array<amrex::Real,3> CellSize (int lev);
static amrex::XDim3 InvCellSize (int lev);
static amrex::RealBox getRealBox(const amrex::Box& bx, int lev);
/**
* \brief Return the lower corner of the box in real units.
* \param bx The box
* \param lev The refinement level of the box
* \param time_shift_delta The time relative to the current time at which to calculate the position
* (when v_galilean is not zero)
* \return An array of the position coordinates
*/
static amrex::XDim3 LowerCorner (const amrex::Box& bx, int lev, amrex::Real time_shift_delta);
/**
* \brief Return the upper corner of the box in real units.
* \param bx The box
* \param lev The refinement level of the box
* \param time_shift_delta The time relative to the current time at which to calculate the position
* (when v_galilean is not zero)
* \return An array of the position coordinates
*/
static amrex::XDim3 UpperCorner (const amrex::Box& bx, int lev, amrex::Real time_shift_delta);
static amrex::IntVect RefRatio (int lev);
static const amrex::iMultiFab* CurrentBufferMasks (int lev);
static const amrex::iMultiFab* GatherBufferMasks (int lev);
static inline auto electrostatic_solver_id = ElectrostaticSolverAlgo::Default;
static inline auto poisson_solver_id = PoissonSolverAlgo::Default;
static int do_moving_window; // boolean
static int start_moving_window_step; // the first step to move window
static int end_moving_window_step; // the last step to move window
/** Returns true if the moving window is active for the provided step
*
* @param step time step
* @return true if active, else false
*/
static int moving_window_active (int const step) {
bool const step_before_end = (step < end_moving_window_step) || (end_moving_window_step < 0);
bool const step_after_start = (step >= start_moving_window_step);
return do_moving_window && step_before_end && step_after_start;
}
static int moving_window_dir;
static amrex::Real moving_window_v;
static bool fft_do_time_averaging;
// these should be private, but can't due to Cuda limitations
static void ComputeDivB (amrex::MultiFab& divB, int dcomp,
ablastr::fields::VectorField const & B,
const std::array<amrex::Real,3>& dx);
static void ComputeDivB (amrex::MultiFab& divB, int dcomp,
ablastr::fields::VectorField const & B,
const std::array<amrex::Real,3>& dx, amrex::IntVect ngrow);
void ComputeDivE(amrex::MultiFab& divE, int lev);
void ProjectionCleanDivB ();
void CalculateExternalCurlA ();
[[nodiscard]] amrex::IntVect getngEB() const { return guard_cells.ng_alloc_EB; }
[[nodiscard]] amrex::IntVect getngF() const { return guard_cells.ng_alloc_F; }
[[nodiscard]] amrex::IntVect getngUpdateAux() const { return guard_cells.ng_UpdateAux; }
[[nodiscard]] amrex::IntVect get_ng_depos_J() const {return guard_cells.ng_depos_J;}
[[nodiscard]] amrex::IntVect get_ng_depos_rho() const {return guard_cells.ng_depos_rho;}
[[nodiscard]] amrex::IntVect get_ng_fieldgather () const {return guard_cells.ng_FieldGather;}
/** Coarsest-level Domain Decomposition
*
* If specified, the domain will be chopped into the exact number
* of pieces in each dimension as specified by this parameter.
*
* @return the number of MPI processes per dimension if specified, otherwise a 0-vector
*/
[[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;}
/** Electrostatic solve call */
void ComputeSpaceChargeField (bool reset_fields);
// Magnetostatic Solver Interface
MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler;
int magnetostatic_solver_max_iters = 200;
int magnetostatic_solver_verbosity = 2;
void ComputeMagnetostaticField ();
void AddMagnetostaticFieldLabFrame ();
void computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr,
ablastr::fields::MultiLevelVectorField const& A,
amrex::Real required_precision=amrex::Real(1.e-11),
amrex::Real absolute_tolerance=amrex::Real(0.0),
int max_iters=200,
int verbosity=2); // const;
void setVectorPotentialBC (ablastr::fields::MultiLevelVectorField const& A) const;
/**
* \brief
* This function computes the E, B, and J fields on each level
* using the parser and the user-defined function for the external fields.
* The subroutine will parse the x_/y_z_external_grid_function and
* then, the field multifab is initialized based on the (x,y,z) position
* on the staggered yee-grid or cell-centered grid, in the interior cells
* and guard cells.
*
* \param[in] field FieldType to grab from register to write into
* \param[in] fx_parser parser function to initialize x-field
* \param[in] fy_parser parser function to initialize y-field
* \param[in] fz_parser parser function to initialize z-field
* \param[in] lev level of the Multifabs that is initialized
* \param[in] patch_type PatchType on which the field is initialized (fine or coarse)
* \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions
* \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not
*/
void ComputeExternalFieldOnGridUsingParser (
warpx::fields::FieldType field,
amrex::ParserExecutor<4> const& fx_parser,
amrex::ParserExecutor<4> const& fy_parser,
amrex::ParserExecutor<4> const& fz_parser,
int lev, PatchType patch_type,
amrex::Vector<std::array< std::unique_ptr<amrex::iMultiFab>,3 > > const& eb_update_field,
bool use_eb_flags);
void ComputeExternalFieldOnGridUsingParser (
warpx::fields::FieldType field,
amrex::ParserExecutor<4> const& fx_parser,
amrex::ParserExecutor<4> const& fy_parser,
amrex::ParserExecutor<4> const& fz_parser,
int lev, PatchType patch_type,
amrex::Vector<std::array< std::unique_ptr<amrex::iMultiFab>,3 > > const& eb_update_field);
/**
* \brief
* This function computes the E, B, and J fields on each level
* using the parser and the user-defined function for the external fields.
* The subroutine will parse the x_/y_z_external_grid_function and
* then, the field multifab is initialized based on the (x,y,z) position
* on the staggered yee-grid or cell-centered grid, in the interior cells
* and guard cells.
*
* \param[in] field string containing field name to grab from register
* \param[in] fx_parser parser function to initialize x-field
* \param[in] fy_parser parser function to initialize y-field
* \param[in] fz_parser parser function to initialize z-field
* \param[in] edge_lengths edge lengths information
* \param[in] face_areas face areas information
* \param[in] topology flag indicating if field is edge-based or face-based
* \param[in] lev level of the Multifabs that is initialized
* \param[in] patch_type PatchType on which the field is initialized (fine or coarse)
* \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions
* \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not
*/
void ComputeExternalFieldOnGridUsingParser (
std::string const& field,
amrex::ParserExecutor<4> const& fx_parser,
amrex::ParserExecutor<4> const& fy_parser,
amrex::ParserExecutor<4> const& fz_parser,
int lev, PatchType patch_type,
amrex::Vector< std::array< std::unique_ptr<amrex::iMultiFab>,3> > const& eb_update_field,
bool use_eb_flags);
void ComputeExternalFieldOnGridUsingParser (
std::string const& field,
amrex::ParserExecutor<4> const& fx_parser,
amrex::ParserExecutor<4> const& fy_parser,
amrex::ParserExecutor<4> const& fz_parser,
int lev, PatchType patch_type,
amrex::Vector< std::array< std::unique_ptr<amrex::iMultiFab>,3> > const& eb_update_field);
/**
* \brief Load field values from a user-specified openPMD file,
* for the fields Ex, Ey, Ez, Bx, By, Bz
*/
void LoadExternalFields (int lev);
/**
* \brief Load field values from a user-specified openPMD file
* for a specific field (specified by `F_name`)
*/
void ReadExternalFieldFromFile (
const std::string& read_fields_from_path, amrex::MultiFab* mf,
const std::string& F_name, const std::string& F_component);
/**
* \brief
* This function initializes and calculates grid quantities used along with
* EBs such as edge lengths, face areas, distance to EB, etc. It also
* appropriately communicates EB data to guard cells.
*
* \param[in] lev level of the Multifabs that is initialized
*/
void InitializeEBGridData(int lev);
/** \brief adds particle and cell contributions in cells to compute heuristic
* cost in each box on each level, and records in `costs`
* @param[in] costs vector of (`unique_ptr` to) vectors; expected to be initialized
* to correct number of boxes and boxes per level
*/
void ComputeCostsHeuristic (amrex::Vector<std::unique_ptr<amrex::LayoutData<amrex::Real> > >& costs);
void ApplyFilterandSumBoundaryRho (int lev, int glev, amrex::MultiFab& rho, int icomp, int ncomp);
// Device vectors of stencil coefficients used for finite-order centering of fields
amrex::Gpu::DeviceVector<amrex::Real> device_field_centering_stencil_coeffs_x;
amrex::Gpu::DeviceVector<amrex::Real> device_field_centering_stencil_coeffs_y;
amrex::Gpu::DeviceVector<amrex::Real> device_field_centering_stencil_coeffs_z;
// Device vectors of stencil coefficients used for finite-order centering of currents
amrex::Gpu::DeviceVector<amrex::Real> device_current_centering_stencil_coeffs_x;
amrex::Gpu::DeviceVector<amrex::Real> device_current_centering_stencil_coeffs_y;
amrex::Gpu::DeviceVector<amrex::Real> device_current_centering_stencil_coeffs_z;
// This needs to be public for CUDA.
//! Tagging cells for refinement
void ErrorEst (int lev, amrex::TagBoxArray& tags, amrex::Real time, int /*ngrow*/) final;
// Return the accelerator lattice instance defined at the given refinement level
const AcceleratorLattice& get_accelerator_lattice (int lev) {return *(m_accelerator_lattice[lev]);}
// for cuda
void BuildBufferMasksInBox ( amrex::Box tbx, amrex::IArrayBox &buffer_mask,
const amrex::IArrayBox &guard_mask, int ng );
#ifdef AMREX_USE_EB
[[nodiscard]] amrex::EBFArrayBoxFactory const& fieldEBFactory (int lev) const noexcept {
return static_cast<amrex::EBFArrayBoxFactory const&>(*m_field_factory[lev]);
}
#endif
void InitEB ();
/**
* \brief Compute the level set function used for particle-boundary interaction.
*/
void ComputeDistanceToEB ();
/**
* \brief Auxiliary function to count the amount of faces which still need to be extended
*/
amrex::Array1D<int, 0, 2> CountExtFaces();
/**
* \brief Main function computing the cell extension. Where possible it computes one-way
* extensions and, when this is not possible, it does eight-ways extensions.
*/
void ComputeFaceExtensions();
/**
* \brief Initialize the memory for the FaceInfoBoxes
*/
void InitBorrowing();
/**
* \brief Shrink the vectors in the FaceInfoBoxes
*/
void ShrinkBorrowing();
/**