diff --git a/HySoP/src/scalesInterface/particles/advec_common_group.f90 b/HySoP/src/scalesInterface/particles/advec_common_group.f90 deleted file mode 100644 index b34991e46fe2c8a1b250df8af6082a6f0d6bb665..0000000000000000000000000000000000000000 --- a/HySoP/src/scalesInterface/particles/advec_common_group.f90 +++ /dev/null @@ -1,43 +0,0 @@ -!> @addtogroup part -!! @{ -!------------------------------------------------------------------------------ -! -! MODULE: advec_common -! -! -! DESCRIPTION: -!> The module ``advec_common'' gather function and subroutines used to advec scalar -!! which are not specific to a direction -!! @details -!! This module gathers functions and routines used to advec scalar which are not -!! specific to a direction. This is a parallel implementation using MPI and -!! the cartesien topology it provides. It also contains the variables common to -!! the solver along each direction and other generic variables used for the -!! advection based on the particle method. -!! -!! Except for testing purpose, this module is not supposed to be used by the -!! main code but only by the other advection module. More precisly, an final user -!! must only used the generic "advec" module wich contain all the interface to -!! solve the advection equation with the particle method, and to choose the -!! remeshing formula, the dimensionnal splitting and everything else. -!! -!! The module "test_advec" can be used in order to validate the procedures -!! embedded in this module. -! -!> @author -!! Jean-Baptiste Lagaert, LEGI -! -!------------------------------------------------------------------------------ - -module advec_common - - ! Velocity interpolation at particle position - use advec_common_velo ,only:AC_velocity_interpol_group, AC_velocity_interpol_no_com - ! Particles remeshing - use advec_common_remesh,only: AC_setup_init, & - & AC_setup_alongX, AC_setup_alongY, AC_setup_alongZ,& - & AC_remesh_lambda_group, AC_remesh_limit_lambda_group, AC_remesh_Mprime_group - - implicit none - -end module advec_common diff --git a/HySoP/src/scalesInterface/particles/advec_common_remesh.f90 b/HySoP/src/scalesInterface/particles/advec_common_remesh.f90 deleted file mode 100644 index 61014df656f06d594f9b00f693a29dec2eec8da6..0000000000000000000000000000000000000000 --- a/HySoP/src/scalesInterface/particles/advec_common_remesh.f90 +++ /dev/null @@ -1,1557 +0,0 @@ -!> @addtogroup part -!! @{ -!------------------------------------------------------------------------------ -! -! MODULE: advec_common -! -! -! DESCRIPTION: -!> The module ``advec_common'' gather function and subroutines used to advec scalar -!! which are not specific to a direction -!! @details -!! This module gathers functions and routines used to advec scalar which are not -!! specific to a direction. This is a parallel implementation using MPI and -!! the cartesien topology it provides. It also contains the variables common to -!! the solver along each direction and other generic variables used for the -!! advection based on the particle method. -!! -!! Except for testing purpose, this module is not supposed to be used by the -!! main code but only by the other advection module. More precisly, an final user -!! must only used the generic "advec" module wich contain all the interface to -!! solve the advection equation with the particle method, and to choose the -!! remeshing formula, the dimensionnal splitting and everything else. -!! -!! The module "test_advec" can be used in order to validate the procedures -!! embedded in this module. -! -!> @author -!! Jean-Baptiste Lagaert, LEGI -! -!------------------------------------------------------------------------------ - -module advec_common_remesh - - use precision_tools - use advec_abstract_proc - - implicit none - - - ! Information about the particles and their bloc - public - - - ! ===== Public procedures ===== - !----- Init remeshing context ----- - public :: AC_setup_init - public :: AC_setup_alongX - public :: AC_setup_alongY - public :: AC_setup_alongZ - !----- To remesh particles ----- - public :: AC_remesh_lambda_group - public :: AC_remesh_Mprime_group - !----- Tools to remesh particles ----- - public :: AC_remesh_range - public :: AC_remesh_determine_communication - public :: AC_remesh_cartography - - ! ===== Private procedures ===== - !----- Prepare and perform communication required during remeshing ----- - private :: AC_remesh_init - private :: AC_remesh_finalize - - ! ===== Public variables ===== - - ! ===== Private variables ===== - !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type). - procedure(remesh_in_buffer_type), pointer, private :: remesh_in_buffer_lambda_pt => null() - !> Pointer to subroutine wich remesh particle to a buffer - for formula of lambda family (with tag/type). - procedure(remesh_in_buffer_limit), pointer, private :: remesh_in_buffer_limit_lambda_pt => null() - !> Pointer to subroutine wich remesh particle to a buffer - for formula of M' family (without tag/type). - procedure(remesh_in_buffer_notype), pointer, private :: remesh_in_buffer_Mprime_pt => null() - !> Pointer to subroutine wich redistribute a buffer (containing remeshed - !! particle) inside the original scalar field. - procedure(remesh_buffer_to_scalar), pointer, private :: remesh_buffer_to_scalar_pt => null() - !> Pointer to subroutine which compute scalar slope along the current - !! direction and then computes the limitator function (divided by 8) - procedure(advec_limitator_group), pointer, private :: advec_limitator => null() - - -contains - -! ===== Public procedure ===== - -! ================================================================================ ! -! ============= To deal with remeshing setup and generecity ============= ! -! ================================================================================ ! - -!> Init remesh_line_pt for the right remeshing formula -subroutine AC_setup_init() - - use advec_remeshing_lambda - use advec_remeshing_Mprime - - call AC_remesh_init_lambda() - call AC_remesh_init_Mprime() - -end subroutine AC_setup_init - -!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X -subroutine AC_setup_alongX() - use advecX - - remesh_in_buffer_lambda_pt => advecX_remesh_in_buffer_lambda - remesh_in_buffer_limit_lambda_pt=> advecX_remesh_in_buffer_limit_lambda - remesh_in_buffer_Mprime_pt => advecX_remesh_in_buffer_Mprime - - remesh_buffer_to_scalar_pt => advecX_remesh_buffer_to_scalar - - advec_limitator => advecX_limitator_group - -end subroutine AC_setup_alongX - -!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along X -subroutine AC_setup_alongY() - use advecY - - remesh_in_buffer_lambda_pt => advecY_remesh_in_buffer_lambda - remesh_in_buffer_limit_lambda_pt=> advecY_remesh_in_buffer_limit_lambda - remesh_in_buffer_Mprime_pt => advecY_remesh_in_buffer_Mprime - remesh_buffer_to_scalar_pt => advecY_remesh_buffer_to_scalar - - advec_limitator => advecY_limitator_group - -end subroutine AC_setup_alongY - -!> Setup remesh_in_buffer and remesh_in_buffer_to_scalar for remeshing along Z -subroutine AC_setup_alongZ() - use advecZ - - remesh_in_buffer_lambda_pt => advecZ_remesh_in_buffer_lambda - remesh_in_buffer_limit_lambda_pt=> advecZ_remesh_in_buffer_limit_lambda - remesh_in_buffer_Mprime_pt => advecZ_remesh_in_buffer_Mprime - remesh_buffer_to_scalar_pt => advecZ_remesh_buffer_to_scalar - - advec_limitator => advecZ_limitator_group - -end subroutine AC_setup_alongZ - - -! ============================================================================================== -! ==================== Remesh all the particles of a group of lines ==================== -! ============================================================================================== - - -!> remeshing with an order 2 or 4 lambda method, corrected to allow large CFL number - group version -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] p_V = particles velocity (needed for tag and type) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in,out] scal = scalar field to advect -!! @param[in] dt = time step (needed for tag and type) -subroutine AC_remesh_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt) - - use advec_variables ! contains info about solver parameters and others. - use cart_topology ! Description of mesh and of mpi topology - use advec_correction ! To compute type and tag - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - real(WP), dimension(:,:,:), intent(in) :: p_V ! particles velocity - real(WP), dimension(:,:,:), intent(inout) :: scal - real(WP), intent(in) :: dt - ! Other local variables - ! To type and tag particles - logical, dimension(bl_nb(direction)+1,gs(1),gs(2)) :: bl_type ! is the particle block a center block or a left one ? - logical, dimension(bl_nb(direction),gs(1),gs(2)) :: bl_tag ! indice of tagged particles - ! Others - integer, dimension(gs(1),gs(2),2) :: send_gap ! distance between me and processus wich send me information - integer, dimension(2) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), allocatable :: send_rank ! rank of processus to wich I send information - integer, dimension(2 , 2) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), allocatable :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! Variable use to manage mpi communications - integer :: max_size ! maximal size of cartography(:,proc_gap) - - ! ===== Pre-Remeshing: Determine blocks type and tag particles ===== - call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag) - - ! ===== Compute range of remeshing data ===== - call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs) - - ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) ===== - ! -- Allocation -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2))) - allocate(send_rank(send_gap_abs(1):send_gap_abs(2))) - ! -- Determine which processes communicate together -- - call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, & - & rece_gap, send_gap, send_gap_abs, send_rank, cartography) - - ! ===== Proceed to remeshing via a local buffer ===== - call AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim, j, k,& - & scal, send_group_min, send_group_max, send_gap_abs, send_rank, & - & rece_gap, cartography, bl_type, bl_tag) - - ! -- Free all communication buffer and data -- - deallocate(cartography) - deallocate(send_rank) - -end subroutine AC_remesh_lambda_group - - -!> remeshing with an order 2 limited lambda method, corrected to allow large CFL number - group version -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] p_V = particles velocity (needed for tag and type) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in,out] scal = scalar field to advect -!! @param[in] dt = time step (needed for tag and type) -subroutine AC_remesh_limit_lambda_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt) - - use advec_variables ! contains info about solver parameters and others. - use cart_topology ! Description of mesh and of mpi topology - use advec_correction ! To compute type and tag - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - real(WP), dimension(:,:,:), intent(in) :: p_V ! particles velocity - real(WP), dimension(:,:,:), intent(inout) :: scal - real(WP), intent(in) :: dt - ! Other local variables - ! To type and tag particles - logical, dimension(bl_nb(direction)+1,gs(1),gs(2)) :: bl_type ! is the particle block a center block or a left one ? - logical, dimension(bl_nb(direction),gs(1),gs(2)) :: bl_tag ! indice of tagged particles - real(WP), dimension(N_proc(direction)+1,gs(1),gs(2)):: limit ! limitator function (divided by 8.) - ! Others - integer, dimension(gs(1),gs(2),2) :: send_gap ! distance between me and processus wich send me information - integer, dimension(2) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), allocatable :: send_rank ! rank of processus to wich I send information - integer, dimension(2 , 2) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), allocatable :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - ! Variable use to manage mpi communications - integer :: max_size ! maximal size of cartography(:,proc_gap) - - ! ===== Pre-Remeshing I: Determine blocks type and tag particles ===== - call AC_type_and_block_group(dt, direction, gs, ind_group, p_V, bl_type, bl_tag) - - ! ===== Compute range of remeshing data ===== - call AC_remesh_range(bl_type, p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs) - - ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) ===== - ! -- Allocation -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2))) - allocate(send_rank(send_gap_abs(1):send_gap_abs(2))) - ! -- Determine which processes communicate together -- - call AC_remesh_determine_communication(direction, gs, ind_group, send_group_min, send_group_max, & - & rece_gap, send_gap, send_gap_abs, send_rank, cartography) - - ! ===== Pre-Remeshing II: Compute the limitor function ===== - ! Actually, this subroutine compute [limitator/8] as this is this fraction - ! wich appear always in the remeshing polynoms. - call advec_limitator(gs, ind_group, j, k, p_pos_adim, scal, limit) - - ! ===== Proceed to remeshing via a local buffer ===== - call AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim,& - & j, k, scal, send_group_min, send_group_max, send_gap_abs, send_rank, & - & rece_gap, cartography, bl_type, bl_tag, limit) - - ! -- Free all communication buffer and data -- - deallocate(cartography) - deallocate(send_rank) - -end subroutine AC_remesh_limit_lambda_group - - -!> remeshing with a M'6 or M'8 remeshing formula - group version -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] p_V = particles velocity (needed for tag and type) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in,out] scal = scalar field to advect -!! @param[in] dt = time step (needed for tag and type) -subroutine AC_remesh_Mprime_group(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt) - - use advec_variables ! contains info about solver parameters and others. - use cart_topology ! Description of mesh and of mpi topology - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - real(WP), dimension(:,:,:), intent(in) :: p_V ! particles velocity - real(WP), dimension(:,:,:), intent(inout) :: scal - real(WP), intent(in) :: dt - ! Other local variables - integer, dimension(gs(1),gs(2),2) :: send_gap ! distance between me and processus wich send me information - integer, dimension(2) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), allocatable :: send_rank ! rank of processus to wich I send information - integer, dimension(2 , 2) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), allocatable :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - ! Variable use to manage mpi communications - integer :: max_size ! maximal size of cartography(:,proc_gap) - - ! ===== Compute range of remeshing data ===== - call AC_remesh_range_notype(p_pos_adim, direction, send_group_min, send_group_max, send_gap, send_gap_abs) - - ! ===== Determine the communication needed : who will communicate whit who ? (ie compute sender and recer) ===== - ! -- Allocation -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(cartography(max_size,send_gap_abs(1):send_gap_abs(2))) - allocate(send_rank(send_gap_abs(1):send_gap_abs(2))) - ! -- Determine which processes communicate together -- - call AC_remesh_determine_communication_com(direction, gs, ind_group, & - & rece_gap, send_gap, send_gap_abs, send_rank, cartography) - - ! ===== Proceed to remeshing via a local buffer ===== - call AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim, & - & j, k, scal, send_group_min, send_group_max, send_gap_abs, & - & send_rank, rece_gap, cartography) - - ! -- Free all communication buffer and data -- - deallocate(cartography) - deallocate(send_rank) - -end subroutine AC_remesh_Mprime_group - - -! =================================================================================================== -! ===== Tools to remesh particles: variant of remeshing via buffer for each family of remeshing ===== -! =================================================================================================== - - -!> Using input information to update the scalar field by creating particle -!! weight (from scalar values), set scalar to 0, redistribute particle inside -!! - variant for corrected lambda remeshing formula. -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] bl_type = table of blocks type (center of left) -!! @param[in] bl_tag = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block -!! and the begining of the following one is tagged) -!! @param[in,out] scal = scalar field to advect -!! @param[in] send_min = minimal indice of mesh involved in remeshing particles -!! @param[in] send_max = maximal indice of mesh involved in remeshing particles -!! @param[in] send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[in] send_rank = mpi rank of processes to which I will send messages. -!! @param[in] rece_gap = coordinate range of processes which will send me information during the remeshing. -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! This procedure manage all communication needed. To minimize communications, -!! particles are remeshing inside a local buffer wich is after send to the -!! processus wich contain the right sub-domain depending off the particle -!! position. There is no need of communication in order to remesh inside the -!! buffer. To avoid recopy in creating particle weight (which will be weight -!! = scalar), scalar is directly redistribute inside the local buffer. -!! This provides: -!! a - Remesh particle: redistribute scalar field inside a local buffer and -!! set scalar = 0. -!! b - Send local buffer to its target processus and update the scalar field, -!! ie scalar = scalar + received buffer. -!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part -!! B except the communication. The current subroutine manage all the -!! communications (and other stuff needed to allow correctness). -subroutine AC_remesh_via_buffer_lambda(direction, ind_group, gs, p_pos_adim, & - & j, k, scal, send_min, send_max, send_gap_abs, send_rank, rece_gap,& - & cartography, bl_type, bl_tag) - - use advec_variables ! contains info about solver parameters and others. - use advec_abstract_proc ! contain some useful procedure pointers. - use advecX ! procedure specific to advection alongX - use advecY ! procedure specific to advection alongY - use advecZ ! procedure specific to advection alongZ - use cart_topology ! Description of mesh and of mpi topology - use mpi - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - logical,dimension(:,:,:),intent(in) :: bl_type ! is the particle block a center block or a left one ? - logical,dimension(:,:,:),intent(in) :: bl_tag ! indice of tagged particles - real(WP),dimension(:,:,:),intent(inout) :: scal - integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information - integer, dimension(2), intent(in) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), intent(in) :: send_rank ! rank of processus to wich I send information - integer, dimension(2, 2), intent(in) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), intent(inout) :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - ! Other local variables - ! Others - integer,dimension(rece_gap(1,1):rece_gap(1,2)) :: rece_rank ! rank of processus wich send me information - integer, dimension(:,:), allocatable :: rece_carto ! same as abobve but for what I receive - integer :: min_size ! minimal size of cartography(:,proc_gap) - integer :: max_size ! maximal size of cartography(:,proc_gap) - ! Variable used to remesh particles in a buffer - real(WP),dimension(:),allocatable,target:: send_buffer ! buffer use to remesh the scalar before to send it to the right subdomain - ! sorted by receivers and not by coordinate. - integer, dimension(:), allocatable :: pos_in_buffer! buffer size - ! Variable use to manage mpi communications - integer, dimension(:), allocatable :: s_request_ran! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: r_request_ran! mpi communication request (handle) of nonblocking receive - integer, dimension(:,:), allocatable :: r_status ! mpi communication status of nonblocking receive - integer, dimension(:,:), allocatable :: s_status ! mpi communication status of nonblocking send - integer :: ierr ! mpi error code - integer :: nb_r, nb_s ! number of reception/send - - - ! ===== Allocation ===== - ! -- allocate request about cartography (non-blocking) reception -- - nb_r = rece_gap(1,2) - rece_gap(1,1) + 1 - allocate(r_request_ran(1:nb_r)) - r_request_ran = MPI_REQUEST_NULL - ! -- allocate cartography about what I receive -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2))) - ! -- allocate request about cartography (non-blocking) send -- - nb_s = send_gap_abs(2) - send_gap_abs(1) + 1 - allocate(s_request_ran(1:nb_s)) - ! -- To manage buffer -- - ! Position of sub-buffer associated different mpi-processes - allocate(pos_in_buffer(0:nb_s)) - - ! ===== Init the remeshing process: pre-process ===== - ! Perform a cartography of mesh points where particle will be remesh, - ! create a 1D to buffer where remeshing will be performed and create - ! tools to manage it. - call AC_remesh_init(direction, ind_group, gs, send_min, send_max, & - & send_gap_abs, send_rank, rece_gap, rece_rank, nb_s, cartography, & - & rece_carto, pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran) - - - ! ===== Initialize the general buffer ===== - allocate(send_buffer(pos_in_buffer(nb_s) & - & + cartography(1,nb_s)-1)) - send_buffer = 0.0 - - ! ===== Remeshing into the buffer by using pointer array ===== - call remesh_in_buffer_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, send_min, & - & send_max, scal, send_buffer, pos_in_buffer) - ! Observe that now: - ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer - ! associated to he i-rd mpi-processus to wich I will send remeshed particles. - - ! ===== Wait for reception of all cartography ===== - allocate(r_status(MPI_STATUS_SIZE,1:nb_r)) - call mpi_waitall(nb_r,r_request_ran, r_status, ierr) - deallocate(r_request_ran) - deallocate(r_status) - !allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - !allocate(ind_array(send_gap_abs(1):send_gap_abs(2))) - !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr) - !deallocate(ind_array) - - ! ===== Finish the remeshing process ===== - ! Send buffer, receive some other buffers and update scalar field. - call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, send_rank, rece_gap, & - & rece_rank, nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size) - - - ! ===== Free memory and communication buffer ==== - ! -- Deallocate all field -- - deallocate(rece_carto) - ! -- Check if Isend are done -- - allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - call mpi_waitall(nb_s, s_request_ran, s_status, ierr) - deallocate(s_status) - ! -- Free all communication buffer and data -- - deallocate(send_buffer) - deallocate(pos_in_buffer) - deallocate(s_request_ran) - -end subroutine AC_remesh_via_buffer_lambda - - -!> Using input information to update the scalar field by creating particle -!! weight (from scalar values), set scalar to 0, redistribute particle inside -!! - variant for corrected and limited lambda remeshing formula. -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] bl_type = table of blocks type (center of left) -!! @param[in] bl_tag = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block -!! and the begining of the following one is tagged) -!! @param[in] limit = limitator function -!! @param[in,out] scal = scalar field to advect -!! @param[in] send_min = minimal indice of mesh involved in remeshing particles -!! @param[in] send_max = maximal indice of mesh involved in remeshing particles -!! @param[in] send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[in] send_rank = mpi rank of processes to which I will send messages. -!! @param[in] rece_gap = coordinate range of processes which will send me information during the remeshing. -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! This procedure manage all communication needed. To minimize communications, -!! particles are remeshing inside a local buffer wich is after send to the -!! processus wich contain the right sub-domain depending off the particle -!! position. There is no need of communication in order to remesh inside the -!! buffer. To avoid recopy in creating particle weight (which will be weight -!! = scalar), scalar is directly redistribute inside the local buffer. -!! This provides: -!! a - Remesh particle: redistribute scalar field inside a local buffer and -!! set scalar = 0. -!! b - Send local buffer to its target processus and update the scalar field, -!! ie scalar = scalar + received buffer. -!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part -!! B except the communication. The current subroutine manage all the -!! communications (and other stuff needed to allow correctness). -subroutine AC_remesh_via_buffer_limit_lambda(direction, ind_group, gs, p_pos_adim, & - & j, k, scal, send_min, send_max, send_gap_abs, send_rank, rece_gap,& - & cartography, bl_type, bl_tag, limit) - - use advec_variables ! contains info about solver parameters and others. - use advec_abstract_proc ! contain some useful procedure pointers. - use advecX ! procedure specific to advection alongX - use advecY ! procedure specific to advection alongY - use advecZ ! procedure specific to advection alongZ - use cart_topology ! Description of mesh and of mpi topology - use mpi - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - logical, dimension(:,:,:), intent(in) :: bl_type ! is the particle block a center block or a left one ? - logical, dimension(:,:,:), intent(in) :: bl_tag ! indice of tagged particles - real(WP), dimension(:,:,:), intent(in) :: limit ! limitator function (divided by 8.) - real(WP),dimension(:,:,:),intent(inout) :: scal - integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information - integer, dimension(2), intent(in) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), intent(in) :: send_rank ! rank of processus to wich I send information - integer, dimension(2, 2), intent(in) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), intent(inout) :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - ! Other local variables - ! Others - integer,dimension(rece_gap(1,1):rece_gap(1,2)) :: rece_rank ! rank of processus wich send me information - integer, dimension(:,:), allocatable :: rece_carto ! same as abobve but for what I receive - integer :: min_size ! minimal size of cartography(:,proc_gap) - integer :: max_size ! maximal size of cartography(:,proc_gap) - ! Variable used to remesh particles in a buffer - real(WP),dimension(:),allocatable,target:: send_buffer ! buffer use to remesh the scalar before to send it to the right subdomain - ! sorted by receivers and not by coordinate. - integer, dimension(:), allocatable :: pos_in_buffer! buffer size - ! Variable use to manage mpi communications - integer, dimension(:), allocatable :: s_request_ran! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: r_request_ran! mpi communication request (handle) of nonblocking receive - integer, dimension(:,:), allocatable :: r_status ! mpi communication status of nonblocking receive - integer, dimension(:,:), allocatable :: s_status ! mpi communication status of nonblocking send - integer :: ierr ! mpi error code - integer :: nb_r, nb_s ! number of reception/send - - - ! ===== Allocation ===== - ! -- allocate request about cartography (non-blocking) reception -- - nb_r = rece_gap(1,2) - rece_gap(1,1) + 1 - allocate(r_request_ran(1:nb_r)) - r_request_ran = MPI_REQUEST_NULL - ! -- allocate cartography about what I receive -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2))) - ! -- allocate request about cartography (non-blocking) send -- - nb_s = send_gap_abs(2) - send_gap_abs(1) + 1 - allocate(s_request_ran(1:nb_s)) - ! -- To manage buffer -- - ! Position of sub-buffer associated different mpi-processes - allocate(pos_in_buffer(0:nb_s)) - - ! ===== Init the remeshing process: pre-process ===== - ! Perform a cartography of mesh points where particle will be remesh, - ! create a 1D to buffer where remeshing will be performed and create - ! tools to manage it. - call AC_remesh_init(direction, ind_group, gs, send_min, send_max, & - & send_gap_abs, send_rank, rece_gap, rece_rank, nb_s, cartography, & - & rece_carto, pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran) - - - ! ===== Initialize the general buffer ===== - allocate(send_buffer(pos_in_buffer(nb_s) & - & + cartography(1,nb_s)-1)) - send_buffer = 0.0 - - ! ===== Remeshing into the buffer by using pointer array ===== - call remesh_in_buffer_limit_lambda_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, bl_type, bl_tag, limit, & - & send_min, send_max, scal, send_buffer, pos_in_buffer) - ! Observe that now: - ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer - ! associated to he i-rd mpi-processus to wich I will send remeshed particles. - - ! ===== Wait for reception of all cartography ===== - allocate(r_status(MPI_STATUS_SIZE,1:nb_r)) - call mpi_waitall(nb_r,r_request_ran, r_status, ierr) - deallocate(r_request_ran) - deallocate(r_status) - !allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - !allocate(ind_array(send_gap_abs(1):send_gap_abs(2))) - !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr) - !deallocate(ind_array) - - ! ===== Finish the remeshing process ===== - ! Send buffer, receive some other buffers and update scalar field. - call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, send_rank, rece_gap, & - & rece_rank, nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size) - - - ! ===== Free memory and communication buffer ==== - ! -- Deallocate all field -- - deallocate(rece_carto) - ! -- Check if Isend are done -- - allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - call mpi_waitall(nb_s, s_request_ran, s_status, ierr) - deallocate(s_status) - ! -- Free all communication buffer and data -- - deallocate(send_buffer) - deallocate(pos_in_buffer) - deallocate(s_request_ran) - -end subroutine AC_remesh_via_buffer_limit_lambda - - -!> Using input information to update the scalar field by creating particle -!! weight (from scalar values), set scalar to 0, redistribute particle inside -!! - variant for M' remeshing formula. -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] gs = size of groups (along X direction) -!! @param[in] j,k = indice of of the current line (x-coordinate and z-coordinate) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in,out] scal = scalar field to advect -!! @param[in] send_min = minimal indice of mesh involved in remeshing particles -!! @param[in] send_max = maximal indice of mesh involved in remeshing particles -!! @param[in] send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[in] send_rank = mpi rank of processes to which I will send messages. -!! @param[in] rece_gap = coordinate range of processes which will send me information during the remeshing. -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! This procedure manage all communication needed. To minimize communications, -!! particles are remeshing inside a local buffer wich is after send to the -!! processus wich contain the right sub-domain depending off the particle -!! position. There is no need of communication in order to remesh inside the -!! buffer. To avoid recopy in creating particle weight (which will be weight -!! = scalar), scalar is directly redistribute inside the local buffer. -!! This provides: -!! a - Remesh particle: redistribute scalar field inside a local buffer and -!! set scalar = 0. -!! b - Send local buffer to its target processus and update the scalar field, -!! ie scalar = scalar + received buffer. -!! "remesh_in_buffer_pt" do the part "a" and "remesh_buffer_to_scalar" the part -!! B except the communication. The current subroutine manage all the -!! communications (and other stuff needed to allow correctness). -subroutine AC_remesh_via_buffer_Mprime(direction, ind_group, gs, p_pos_adim, & - & j, k, scal, send_min, send_max, send_gap_abs, send_rank, rece_gap, & - & cartography) - - use advec_variables ! contains info about solver parameters and others. - use advec_abstract_proc ! contain some useful procedure pointers. - use advecX ! procedure specific to advection alongX - use advecY ! procedure specific to advection alongY - use advecZ ! procedure specific to advection alongZ - use cart_topology ! Description of mesh and of mpi topology - use mpi - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - real(WP),dimension(:,:,:),intent(inout) :: scal - integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information - integer, dimension(2), intent(in) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), intent(in) :: send_rank ! rank of processus to wich I send information - integer, dimension(2, 2), intent(in) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:,:), intent(inout) :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - ! Other local variables - ! Others - integer,dimension(rece_gap(1,1):rece_gap(1,2)) :: rece_rank ! rank of processus wich send me information - integer, dimension(:,:), allocatable :: rece_carto ! same as abobve but for what I receive - integer :: min_size ! minimal size of cartography(:,proc_gap) - integer :: max_size ! maximal size of cartography(:,proc_gap) - ! Variable used to remesh particles in a buffer - real(WP),dimension(:),allocatable,target:: send_buffer ! buffer use to remesh the scalar before to send it to the right subdomain - ! sorted by receivers and not by coordinate. - integer, dimension(:), allocatable :: pos_in_buffer! buffer size - ! Variable use to manage mpi communications - integer, dimension(:), allocatable :: s_request_ran! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: r_request_ran! mpi communication request (handle) of nonblocking receive - integer, dimension(:,:), allocatable :: r_status ! mpi communication status of nonblocking receive - integer, dimension(:,:), allocatable :: s_status ! mpi communication status of nonblocking send - integer :: ierr ! mpi error code - integer :: nb_r, nb_s ! number of reception/send - - - ! ===== Allocation ===== - ! -- allocate request about cartography (non-blocking) reception -- - nb_r = rece_gap(1,2) - rece_gap(1,1) + 1 - allocate(r_request_ran(1:nb_r)) - r_request_ran = MPI_REQUEST_NULL - ! -- allocate cartography about what I receive -- - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(rece_carto(max_size,rece_gap(1,1):rece_gap(1,2))) - ! -- allocate request about cartography (non-blocking) send -- - nb_s = send_gap_abs(2) - send_gap_abs(1) + 1 - allocate(s_request_ran(1:nb_s)) - ! -- To manage buffer -- - ! Position of sub-buffer associated different mpi-processes - allocate(pos_in_buffer(0:nb_s)) - - ! ===== Init the remeshing process: pre-process ===== - ! Perform a cartography of mesh points where particle will be remesh, - ! create a 1D to buffer where remeshing will be performed and create - ! tools to manage it. - call AC_remesh_init(direction, ind_group, gs, send_min, send_max, & - & send_gap_abs, send_rank, rece_gap, rece_rank, nb_s, cartography, & - & rece_carto, pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran) - - - ! ===== Initialize the general buffer ===== - allocate(send_buffer(pos_in_buffer(nb_s) & - & + cartography(1,nb_s)-1)) - send_buffer = 0.0 - - ! ===== Remeshing into the buffer by using pointer array ===== - call remesh_in_buffer_Mprime_pt(gs, j, k, send_gap_abs(1)-1, p_pos_adim, send_min, & - & send_max, scal, send_buffer, pos_in_buffer) - ! Observe that now: - ! => pos_in_buffer(i-1) = first (1D-)indice of the sub-array of send_buffer - ! associated to he i-rd mpi-processus to wich I will send remeshed particles. - - ! ===== Wait for reception of all cartography ===== - allocate(r_status(MPI_STATUS_SIZE,1:nb_r)) - call mpi_waitall(nb_r,r_request_ran, r_status, ierr) - deallocate(r_request_ran) - deallocate(r_status) - !allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - !allocate(ind_array(send_gap_abs(1):send_gap_abs(2))) - !call mpi_testsome(size(s_request_ran),s_request_ran, ind_1Dtable, ind_array, s_status, ierr) - !deallocate(ind_array) - - ! ===== Finish the remeshing process ===== - ! Send buffer, receive some other buffers and update scalar field. - call AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, send_rank, rece_gap, & - & rece_rank, nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size) - - - ! ===== Free memory and communication buffer ==== - ! -- Deallocate all field -- - deallocate(rece_carto) - ! -- Check if Isend are done -- - allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - call mpi_waitall(nb_s, s_request_ran, s_status, ierr) - deallocate(s_status) - ! -- Free all communication buffer and data -- - deallocate(send_buffer) - deallocate(pos_in_buffer) - deallocate(s_request_ran) - -end subroutine AC_remesh_via_buffer_Mprime - - -! ================================================================================== -! ==================== Other tools to remesh particles ==================== -! ================================================================================== - -!> Determine where the particles of each lines will be remeshed -!! @param[in] bl_type = equal 0 (resp 1) if the block is left (resp centered) -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[out] send_min = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[out] send_max = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[out] send_gap = distance between me and processus wich send me information (for each line of the group) -!! @param[out] send_gap_abs = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -subroutine AC_remesh_range(bl_type, p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs) - - use advec_variables ! contains info about solver parameters and others. - use cart_topology ! Description of mesh and of mpi topology - - ! Input/Output - logical, dimension(:,:,:), intent(in) :: bl_type ! is the particle block a center block or a left one ? - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - integer, intent(in) :: direction - integer, dimension(:,:), intent(out) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(out) :: send_max ! distance between me and processus wich send me information - integer, dimension(:,:,:), intent(out) :: send_gap ! distance between me and processus wich send me information - integer, dimension(2), intent(out) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - - ! -- Compute ranges -- - where (bl_type(1,:,:)) - ! First particle is a centered one - send_min = nint(p_pos_adim(1,:,:))-remesh_stencil(1) - elsewhere - ! First particle is a left one - send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1) - end where - where (bl_type(bl_nb(direction)+1,:,:)) - ! Last particle is a centered one - send_max = nint(p_pos_adim(N_proc(direction),:,:))+remesh_stencil(2) - elsewhere - ! Last particle is a left one - send_max = floor(p_pos_adim(N_proc(direction),:,:))+remesh_stencil(2) - end where - - ! -- What have I to communicate ? -- - send_gap(:,:,1) = floor(real(send_min-1)/N_proc(direction)) - send_gap(:,:,2) = floor(real(send_max-1)/N_proc(direction)) - send_gap_abs(1) = minval(send_gap(:,:,1)) - send_gap_abs(2) = maxval(send_gap(:,:,2)) - -end subroutine AC_remesh_range - - -!> Determine where the particles of each lines will be remeshed - Variant for -!! remeshing without type/tag -!! @param[in] p_pos_adim = adimensionned particles position -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[out] send_min = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[out] send_max = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[out] send_gap = distance between me and processus wich send me information (for each line of the group) -!! @param[out] send_gap_abs = send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -subroutine AC_remesh_range_notype(p_pos_adim, direction, send_min, send_max, send_gap, send_gap_abs) - - use advec_variables ! contains info about solver parameters and others. - use cart_topology ! Description of mesh and of mpi topology - - ! Input/Output - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position - integer, intent(in) :: direction - integer, dimension(:,:), intent(out) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(out) :: send_max ! distance between me and processus wich send me information - integer, dimension(:,:,:), intent(out) :: send_gap ! distance between me and processus wich send me information - integer, dimension(2), intent(out) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - - ! -- Compute ranges -- - send_min = floor(p_pos_adim(1,:,:))-remesh_stencil(1) - send_max = floor(p_pos_adim(N_proc(direction),:,:))+remesh_stencil(2) - - ! -- What have I to communicate ? -- - send_gap(:,:,1) = floor(real(send_min-1)/N_proc(direction)) - send_gap(:,:,2) = floor(real(send_max-1)/N_proc(direction)) - send_gap_abs(1) = minval(send_gap(:,:,1)) - send_gap_abs(2) = maxval(send_gap(:,:,2)) - -end subroutine AC_remesh_range_notype - - -!> Determine the set of processes wich will send me information during the remeshing -!! and compute for each of these processes the range of wanted data. Use implicit -!! computation rather than communication (only possible if particle are gather by -!! block whith contrainst on velocity variation - as corrected lambda formula.) - -!! work directly on a group of particles lines. -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[in] gs = size of group of line along the current direction -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] remesh_min = minimal indice of meshes where I will remesh my particles. -!! @param[in] remesh_max = maximal indice of meshes where I will remesh my particles. -!! @param[out] rece_gap = coordinate range of processes which will send me information during the remeshing. -!! @param[in] send_gap = distance between me and processus wich send me information (for each line of the group) -!! @param[in] send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[out] send_rank = mpi rank of processes to which I will send messages. -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! Work on a group of line of size gs(1) x gs(2)) -!! Obtain the list of processus which are associated to sub-domain where my particles -!! will be remeshed and the list of processes wich contains particles which -!! have to be remeshed in my sub-domain. This way, this procedure determine -!! which processus need to communicate together in order to proceed to the -!! remeshing (as in a parrallel context the real space is subdivised and each -!! processus contains a part of it) -!! In the same time, it computes for each processus with which I will -!! communicate, the range of mesh point involved for each line of particles -!! inside the group and it stores it by using some sparse matrix technics -!! (see cartography defined in the algorithm documentation) -!! This routine does not involve any communication to determine if -!! a processus is the first or the last processes (considering its coordinate along -!! the current directory) to send remeshing information to a given processes. -!! It directly compute it using contraints on velocity (as in corrected lambda -!! scheme) When possible use it rather than AC_obtain_senders_com -subroutine AC_remesh_determine_communication(direction, gs, ind_group, remesh_min, remesh_max, & - & rece_gap, send_gap, send_gap_abs, send_rank, cartography) -! XXX Work only for periodic condition. For dirichlet conditions : it is -! possible to not receive either rece_gap(1), either rece_gap(2) or none of -! these two => detect it (track the first and the last particles) and deal with it. - - use cart_topology ! info about mesh and mpi topology - use advec_variables ! contains info about solver parameters and others. - use mpi - - ! Input/output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: gs ! a group size - integer, dimension(2), intent(in) :: ind_group - integer, dimension(:,:), intent(in) :: remesh_min ! minimal indice of meshes where I will remesh my particles. - integer, dimension(:,:), intent(in) :: remesh_max ! maximal indice of meshes where I will remesh my particles. - integer, dimension(2, 2), intent(out) :: rece_gap - integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap ! minimal and maximal processus which contains the sub-domains where my - ! particles will be remeshed for each line of the line group - integer, dimension(2), intent(in) :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed. - integer,dimension(send_gap_abs(1):send_gap_abs(2))& - &, intent(out) :: send_rank ! rank of processus to wich I will send data - integer, dimension(2+gs(2)*(2+3*gs(1)), & - & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography - - ! To manage communications and to localize sub-domain - integer(kind=4) :: proc_gap ! gap between a processus coordinate (along the current - ! direction) into the mpi-topology and my coordinate - integer :: rankP ! processus rank for shift (P= previous) - integer, dimension(2) :: tag_table ! mpi message tag (for communicate rece_gap(1) and rece_gap(2)) - integer, dimension(:,:),allocatable :: send_request ! mpi status of nonblocking send - integer :: ierr ! mpi error code - integer, dimension(MPI_STATUS_SIZE) :: statut ! mpi status - ! To determine which processus is the first/last to send data to another - integer, dimension(:,:), allocatable :: first, last ! Storage processus to which I will be the first (or the last) to send - ! remeshed particles - integer :: first_condition ! allowed range of value of proc_min and proc_max for being the first - integer :: last_condition ! allowed range of value of proc_min and proc_max for being the last - ! Other local variable - integer :: ind1, ind2 ! indice of the current line inside the group - integer :: min_size ! begin indice in first and last to stock indice along first dimension of the group line - integer :: gp_size ! group size - integer,dimension(2) :: rece_buffer ! buffer for reception of rece_max - logical :: begin_interval ! ware we in the start of an interval ? - - rece_gap(1,1) = 3*N(direction) - rece_gap(1,2) = -3*N(direction) - rece_gap(2,:) = 0 - gp_size = gs(1)*gs(2) - - allocate(send_request(send_gap_abs(1):send_gap_abs(2),3)) - send_request(:,3) = 0 - - ! ===== Compute if I am first or last and determine the carography ===== - min_size = 2 + gs(2) - ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) - ! to require information from this processus - allocate(first(2,send_gap_abs(1):send_gap_abs(2))) - first(2,:) = 0 ! number of lines for which I am the first - allocate(last(2,send_gap_abs(1):send_gap_abs(2))) - last(2,:) = 0 ! number of lines for which I am the last - ! Initialize cartography - cartography(1,:) = 0 ! number of velocity values to receive - cartography(2,:) = min_size ! number of element to send when sending cartography - ! And compute cartography, first and last ! - do proc_gap = send_gap_abs(1), send_gap_abs(2) - first(1,proc_gap) = -proc_gap - last(1,proc_gap) = -proc_gap - first_condition = 1-2*bl_bound_size + proc_gap*N_proc(direction)+1 - last_condition = -1+2*bl_bound_size + (proc_gap+1)*N_proc(direction) - call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr) - do ind2 = 1, gs(2) - cartography(2+ind2,proc_gap) = 0 ! 2 x number of interval of concern line into the column i2 - begin_interval = .true. - do ind1 = 1, gs(1) - ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]? - if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then - ! Compute if I am the first. - if (remesh_min(ind1,ind2)< first_condition) first(2,proc_gap) = first(2,proc_gap)+1 - ! Compute if I am the last. - if (remesh_max(ind1,ind2) > last_condition) last(2,proc_gap) = last(2,proc_gap)+1 - ! Update cartography // Needed even if target processus is myself as we us buffer - ! in all the case (scalar field cannot be used directly during the remeshing) - if (begin_interval) then - cartography(2+ind2,proc_gap) = cartography(2+ind2,proc_gap)+2 - cartography(cartography(2,proc_gap)+1,proc_gap) = ind1 - cartography(2,proc_gap) = cartography(2,proc_gap) + 2 - cartography(cartography(2,proc_gap),proc_gap) = ind1 - begin_interval = .false. - else - cartography(cartography(2,proc_gap),proc_gap) = ind1 - end if - else - begin_interval = .true. - end if - end do - end do - end do - - ! ===== Send information about first and last ===== - tag_table = compute_tag(ind_group, tag_obtsend_NP, direction) - do proc_gap = send_gap_abs(1), send_gap_abs(2) - ! I am the first ? - if (first(2,proc_gap)>0) then - if(send_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(1), D_comm(direction), & - & send_request(proc_gap,1), ierr) - send_request(proc_gap,3) = 1 - else - rece_gap(1,1) = min(rece_gap(1,1), -proc_gap) - rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap) - end if - end if - ! I am the last ? - if (last(2,proc_gap)>0) then - if(send_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(2), D_comm(direction), & - & send_request(proc_gap,2), ierr) - send_request(proc_gap,3) = send_request(proc_gap, 3) + 2 - else - rece_gap(1,2) = max(rece_gap(1,2), -proc_gap) - rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap) - end if - end if - end do - - ! ===== Receive information form the first and the last processus which need a part of my local velocity field ===== - do while(rece_gap(2,1) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr) - rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1)) - rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2) - end do - do while(rece_gap(2,2) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr) - rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1)) - rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2) - end do - - ! ===== Free Isend buffer ===== - do proc_gap = send_gap_abs(1), send_gap_abs(2) - select case (send_request(proc_gap,3)) - case (3) - call mpi_wait(send_request(proc_gap,1), statut, ierr) - call mpi_wait(send_request(proc_gap,2), statut, ierr) - case (2) - call mpi_wait(send_request(proc_gap,2), statut, ierr) - case (1) - call mpi_wait(send_request(proc_gap,1), statut, ierr) - end select - end do - - ! ===== Deallocate fields ===== - deallocate(send_request) - deallocate(first) - deallocate(last) - -end subroutine AC_remesh_determine_communication - - -!> Determine the set of processes wich will send me information during the remeshing -!! and compute for each of these processes the range of wanted data. Version for M'6 -!! scheme (some implicitation can not be done anymore) -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[in] gs = size of group of line along the current direction -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[out] rece_gap = coordinate range of processes which will send me information during the remeshing. -!! @param[in] send_gap = distance between me and processus to wich I will send information (for each line of the group) -!! @param[in] send_gap_abs= send_gap_abs(i) is the min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[out] send_rank = mpi rank of processes to which I will send messages. -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! Work on a group of line of size gs(1) x gs(2)) -!! Obtain the list of processus which are associated to sub-domain where my particles -!! will be remeshed and the list of processes wich contains particles which -!! have to be remeshed in my sub-domain. This way, this procedure determine -!! which processus need to communicate together in order to proceed to the -!! remeshing (as in a parrallel context the real space is subdivised and each -!! processus contains a part of it) -!! In the same time, it computes for each processus with which I will -!! communicate, the range of mesh point involved for each line of particles -!! inside the group and it stores it by using some sparse matrix technics -!! (see cartography defined in the algorithm documentation) -!! This routine involves communication to determine if a processus is -!! the first or the last processes (considering its coordinate along -!! the current directory) to send remeshing information to a given processes. -subroutine AC_remesh_determine_communication_com(direction, gs, ind_group, & - & rece_gap, send_gap, send_gap_abs, send_rank, cartography) -! XXX Work only for periodic condition. - - use cart_topology ! info about mesh and mpi topology - use advec_variables ! contains info about solver parameters and others. - use mpi - - ! Input/output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: gs ! a group size - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2, 2), intent(out) :: rece_gap ! minimal and maximal processus which will remesh inside me - integer(kind=4), dimension(gs(1),gs(2),2),intent(in):: send_gap ! minimal and maximal processus which contains the sub-domains where my - ! particles will be remeshed for each line of the line group - integer, dimension(2), intent(in) :: send_gap_abs ! min and maximal processus which contains the sub-domains where my particles will be remeshed. - integer,dimension(send_gap_abs(1):send_gap_abs(2))& - &, intent(out) :: send_rank ! rank of processus to wich I will send data - integer, dimension(2+gs(2)*(2+3*gs(1)), & - & send_gap_abs(1):send_gap_abs(2)), intent(out) :: cartography - - ! To manage communications and to localize sub-domain - integer(kind=4) :: proc_gap ! gap between a processus coordinate (along the current - ! direction) into the mpi-topology and my coordinate - integer :: rankP ! processus rank for shift (P= previous) - integer, dimension(2) :: tag_table ! mpi message tag (for communicate rece_gap(1) and rece_gap(2)) - integer, dimension(:,:),allocatable :: send_request ! mpi status of nonblocking send - integer :: ierr ! mpi error code - integer, dimension(MPI_STATUS_SIZE) :: statut ! mpi status - ! To determine which processus is the first/last to send data to another - integer, dimension(gs(1), gs(2)) :: send_max_prev ! maximum gap between previous processus and the receivers of its remeshing buffer - integer, dimension(gs(1), gs(2)) :: send_min_next ! minimum gap between next processus and the receivers of its remeshing buffer - integer, dimension(:,:), allocatable :: first, last ! Storage processus to which I will be the first (or the last) to send - ! remeshed particles - ! Other local variable - integer :: ind1, ind2 ! indice of the current line inside the group - integer :: min_size ! begin indice in first and last to stock indice along first dimension of the group line - integer :: gp_size ! group size - integer,dimension(2) :: rece_buffer ! buffer for reception of rece_max - logical :: begin_interval ! are we in the start of an interval ? - - rece_gap(1,1) = 3*N(direction) - rece_gap(1,2) = -3*N(direction) - rece_gap(2,:) = 0 - gp_size = gs(1)*gs(2) - - allocate(send_request(send_gap_abs(1):send_gap_abs(2),3)) - send_request(:,3) = 0 - - ! ===== Exchange ghost ===== - ! Compute message tag - we re-use tag_part_tag_NP id as using this procedure - ! suppose not using "AC_type_and_block" - tag_table = compute_tag(ind_group, tag_part_tag_NP, direction) - ! Exchange "ghost" - call mpi_Sendrecv(send_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), & - & send_min_next(1,1), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(1), & - & D_comm(direction), statut, ierr) - call mpi_Sendrecv(send_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(2), & - & send_max_prev(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), & - & D_comm(direction), statut, ierr) - ! Translat to adapt gap to my position - send_max_prev = send_max_prev - 1 - send_min_next = send_min_next + 1 - - ! ===== Compute if I am first or last and determine the cartography ===== - min_size = 2 + gs(2) - ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) - ! to require information from this processus - allocate(first(2,send_gap_abs(1):send_gap_abs(2))) - first(2,:) = 0 ! number of lines for which I am the first - allocate(last(2,send_gap_abs(1):send_gap_abs(2))) - last(2,:) = 0 ! number of lines for which I am the last - ! Initialize cartography - cartography(1,:) = 0 ! number of velocity values to receive - cartography(2,:) = min_size ! number of element to send when sending cartography - ! And compute cartography, first and last ! - do proc_gap = send_gap_abs(1), send_gap_abs(2) - first(1,proc_gap) = -proc_gap - last(1,proc_gap) = -proc_gap - call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr) - do ind2 = 1, gs(2) - cartography(2+ind2,proc_gap) = 0 ! 2 x number of interval of concern line into the column i2 - begin_interval = .true. - do ind1 = 1, gs(1) - ! Does proc_gap belongs to [send_gap(i1,i2,1);send_gap(i1,i2,2)]? - if((proc_gap>=send_gap(ind1,ind2,1)).and.(proc_gap<=send_gap(ind1,ind2,2))) then - ! Compute if I am the first. - if(proc_gap > send_max_prev(ind1,ind2)) first(2,proc_gap) = first(2,proc_gap)+1 - ! Compute if I am the last. - if(proc_gap < send_min_next(ind1,ind2)) last(2,proc_gap) = last(2,proc_gap)+1 - ! Update cartography // Needed even if target processus is myself as we us buffer - ! in all the case (scalar field cannot be used directly during the remeshing) - if (begin_interval) then - cartography(2+ind2,proc_gap) = cartography(2+ind2,proc_gap)+2 - cartography(cartography(2,proc_gap)+1,proc_gap) = ind1 - cartography(2,proc_gap) = cartography(2,proc_gap) + 2 - cartography(cartography(2,proc_gap),proc_gap) = ind1 - begin_interval = .false. - else - cartography(cartography(2,proc_gap),proc_gap) = ind1 - end if - else - begin_interval = .true. - end if - end do - end do - end do - - ! ===== Send information about first and last ===== - tag_table = compute_tag(ind_group, tag_obtsend_NP, direction) - do proc_gap = send_gap_abs(1), send_gap_abs(2) - ! I am the first ? - if (first(2,proc_gap)>0) then - if(send_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(1), D_comm(direction), & - & send_request(proc_gap,1), ierr) - send_request(proc_gap,3) = 1 - else - rece_gap(1,1) = min(rece_gap(1,1), -proc_gap) - rece_gap(2,1) = rece_gap(2,1) + first(2,proc_gap) - end if - end if - ! I am the last ? - if (last(2,proc_gap)>0) then - if(send_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, send_rank(proc_gap), tag_table(2), D_comm(direction), & - & send_request(proc_gap,2), ierr) - send_request(proc_gap,3) = send_request(proc_gap, 3) + 2 - else - rece_gap(1,2) = max(rece_gap(1,2), -proc_gap) - rece_gap(2,2) = rece_gap(2,2) + last(2,proc_gap) - end if - end if - end do - - ! ===== Receive information form the first and the last processus which need a part of my local velocity field ===== - do while(rece_gap(2,1) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr) - rece_gap(1,1) = min(rece_gap(1,1), rece_buffer(1)) - rece_gap(2,1) = rece_gap(2,1) + rece_buffer(2) - end do - do while(rece_gap(2,2) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr) - rece_gap(1,2) = max(rece_gap(1,2), rece_buffer(1)) - rece_gap(2,2) = rece_gap(2,2) + rece_buffer(2) - end do - - ! ===== Free Isend buffer ===== - do proc_gap = send_gap_abs(1), send_gap_abs(2) - select case (send_request(proc_gap,3)) - case (3) - call mpi_wait(send_request(proc_gap,1), statut, ierr) - call mpi_wait(send_request(proc_gap,2), statut, ierr) - case (2) - call mpi_wait(send_request(proc_gap,2), statut, ierr) - case (1) - call mpi_wait(send_request(proc_gap,1), statut, ierr) - end select - end do - - ! ===== Deallocate fields ===== - deallocate(send_request) - deallocate(first) - deallocate(last) - -end subroutine AC_remesh_determine_communication_com - - -!> Update the cartography of data which will be exchange from a processus to another in order to remesh particles. -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[in] gs = size of group of line along the current direction -!! @param[in] begin_i1 = indice corresponding to the first place into the cartography -!! array where indice along the the direction of the group of lines are stored. -!! @param[in] proc_gap = distance between my (mpi) coordonate and coordinate of the target processus -!! @param[in] ind_carto = current column inside the cartography (different to proc_Gap as in this procedure -!! therefore first indice = 1, carto range are not given into argument) -!! @param[in] send_min = minimal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[in] send_max = maximal indice of mesh involved in remeshing particles (of the particles in my local subdomains) -!! @param[in,out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @param[out] com_size = number of elements (integers) stored into the cartography (which will be the size of some mpi communication) -subroutine AC_remesh_cartography(direction, gs, begin_i1, proc_gap, ind_carto, send_min, send_max, cartography, com_size) - - use cart_topology ! Description of mesh and of mpi topology - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: begin_i1 ! indice corresponding to the first place into the cartography - ! array where indice along the the direction of the group of - ! lines are stored. - integer, intent(in) :: proc_gap ! distance between my (mpi) coordonate and coordinate of the target - integer, intent(in) :: ind_carto ! current column inside the cartography (different to proc_Gap as in this procedure - ! therefore first indice = 1, carto range are not given into argument) - integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information - integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information - integer, dimension(:,:), intent(inout) :: cartography - integer, intent(out) :: com_size ! number of elements (integers) stored into the cartography (which will - ! be the size of some mpi communication) - - ! Other local variables - integer :: gap ! gap between my local indices and the local indices from another processes - integer :: i1, i2 ! indice of a line into the group - integer :: ind_for_i1 ! where to read the first coordinate (i1) of the current line inside the cartography ? - integer :: ind_1Dtable ! indice of my current position inside a one-dimensionnal table - - cartography(1,ind_carto) = 0 - ! Use the cartography to know which lines are concerned - com_size = cartography(2,ind_carto) - ! Range I want - store into the cartography - gap = proc_gap*N_proc(direction) - ! Position in cartography(:,ind_carto) of the current i1 indice - ind_for_i1 = begin_i1 - do i2 = 1, gs(2) - do ind_1Dtable = ind_for_i1+1, ind_for_i1 + cartography(2+i2,ind_carto), 2 - do i1 = cartography(ind_1Dtable,ind_carto), cartography(ind_1Dtable+1,ind_carto) - ! Interval start from: - cartography(com_size+1,ind_carto) = max(send_min(i1,i2), gap+1) ! fortran => indice start from 0 - ! and ends at: - cartography(com_size+2,ind_carto) = min(send_max(i1,i2), gap+N_proc(direction)) - ! update number of element to send - cartography(1,ind_carto) = cartography(1,ind_carto) & - & + cartography(com_size+2,ind_carto) & - & - cartography(com_size+1,ind_carto) + 1 - com_size = com_size+2 - end do - end do - ind_for_i1 = ind_for_i1 + cartography(2+i2,ind_carto) - end do - -end subroutine AC_remesh_cartography - - -!> Perform all the pre-process in order to remesh particle and to perform associated communication. -!! @ detail -!! As geometric domain is subdivise among the different mpi-processes, the -!! particle remeshing involve mpi-communication in order to re-distribuate -!! particle weight to the rigth place. -!! In order to gather theses communications for different particles lines, -!! the particle remeshing is performed into a buffer. The buffer is an 1D-array -!! which structure ensure that all the value that has to be send to a given -!! processus is memory continguous. -!! This subroutine create this buffer and provide a map to manage it. This -!! map allow to associate a XYZ-coordinate (into the geometrical domain) to each -!! element of this 1D-array. -subroutine AC_remesh_init(direction, ind_group, gs, send_min, send_max, & - & send_gap_abs, send_rank, rece_gap, rece_rank, nb_s, cartography, & - & rece_carto, pos_in_buffer, min_size, max_size, s_request_ran, r_request_ran) - - use mpi - use cart_topology ! Description of mesh and of mpi topology - use advec_variables ! contains info about solver parameters and others. - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, dimension(:,:), intent(in) :: send_min ! distance between me and first processus wich send me information (for each line of particle) - integer, dimension(:,:), intent(in) :: send_max ! distance between me and last processus wich send me information - integer, dimension(2), intent(in) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), intent(in) :: send_rank ! rank of processus to wich I send information - integer, dimension(2, 2), intent(in) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(rece_gap(1,1):rece_gap(1,2)), intent(inout) :: rece_rank ! rank of processus wich send me information - integer, intent(in) :: nb_s ! number of reception/send - integer, dimension(:,:), intent(inout) :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - integer, dimension(:,:), intent(inout) :: rece_carto ! same as abobve but for what I receive - ! of mesh points from where it requiers the velocity values. - integer,dimension(0:nb_s),intent(inout) :: pos_in_buffer! information about organization of the 1D buffer used to remesh - ! a 3D set of particles. - integer, intent(out) :: min_size ! tool to manage cartography - integer, intent(in) :: max_size ! tool to manage cartography - integer, dimension(:), intent(inout) :: s_request_ran! mpi communication request (handle) of nonblocking send - integer, dimension(:), intent(inout) :: r_request_ran! mpi communication request (handle) of nonblocking receive - - ! Others - integer :: proc_gap ! distance between my (mpi) coordonate and coordinate of the - ! processus associated to a given position - integer :: ind_gap ! loop indice - integer :: ind_1Dtable ! indice of my current position inside a one-dimensionnal table - ! Variable use to manage mpi communications - integer :: com_size ! size of message send/receive - integer :: tag ! mpi message tag - integer :: ierr ! mpi error code - integer :: rankP ! mpi rank (mpi_cart_shif need to argument wich are rank) - - ! ===== Receive cartography ===== - ! It is better to post recceive before sending. - ind_1Dtable = 0 - do proc_gap = rece_gap(1,1), rece_gap(1,2) - ind_1Dtable = ind_1Dtable + 1 - call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr) - if (rece_rank(proc_gap)/= D_rank(direction)) then - tag = compute_tag(ind_group, tag_bufToScal_range, direction, -proc_gap) - call mpi_Irecv(rece_carto(1,ind_1Dtable), max_size, MPI_INTEGER, & - & rece_rank(proc_gap), tag, D_COMM(direction), r_request_ran(ind_1Dtable), ierr) - else - rece_carto(1,ind_1Dtable) = 0 - end if - end do - - ! ===== Complete cartography and send range about the particles I remesh ===== - s_request_ran = MPI_REQUEST_NULL - min_size = 2 + gs(2) - proc_gap = send_gap_abs(1) - 1 - do ind_gap = 1, nb_s !send_gap_abs(2), send_gap_abs(1) + 1 - proc_gap = proc_gap + 1 - !proc_gap = ind_gap+send_gap_abs(1)-1 - call AC_remesh_cartography(direction, gs, min_size, proc_gap, ind_gap, & - & send_min, send_max, cartography, com_size) - ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction and unique Id. - tag = compute_tag(ind_group, tag_bufToScal_range, direction, proc_gap) - ! Send message - if (send_rank(ind_gap) /= D_rank(direction)) then - call mpi_ISsend(cartography(1,ind_gap), com_size, MPI_INTEGER, send_rank(ind_gap), tag, & - & D_comm(direction), s_request_ran(ind_gap),ierr) - end if - end do - - ! ===== Initialize the general buffer ===== - ! The same buffer is used to send data to all target processes. It size - ! has to be computed as the part reserved to each processus. - ! and it has to be splitted into parts for each target processes - ! => pos_in_buffer(i) = first (1D-)indice of the sub-array of send_buffer - ! associated to he i-rd mpi-processus to wich I will send remeshed particles. - pos_in_buffer(0) = 1 - pos_in_buffer(1) = 1 - do ind_gap =1, nb_s - 1 !send_gap_abs(2)-send_gap_abs(1) - pos_in_buffer(ind_gap+1)= pos_in_buffer(ind_gap) + cartography(1,ind_gap) - end do - ! In writing values in the send buffer during the remeshing, pos_in_buffer will be update. - ! As it has one supplementary element (the "0" one), after this process pos_in_buffer(i-1) - ! will be equal to first (1D-)indice of the sub-array of send_buffer - ! associated to he i-rd mpi-processus to wich I will send remeshed particles. - -end subroutine AC_remesh_init - -!> Perform all the staff to compute scalar value at t+dt from the buffer -!containing the remeshing of local particles. -!! @ detail -!! After having remeshing the particles of the local sub-domain into a -!! buffer, it remains to send the buffer to the different processus according -!! to the domain sub-division into each processus. Then, the local scalar field -!! is update thanks to the received buffers. -subroutine AC_remesh_finalize(direction, ind_group, gs, j, k, scal, send_gap_abs, send_rank, rece_gap, & - & rece_rank, nb_r, nb_s, cartography, rece_carto, send_buffer, pos_in_buffer, min_size) - - use mpi - use cart_topology ! Description of mesh and of mpi topology - use advec_variables ! contains info about solver parameters and others. - - ! Input/Output - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, intent(in) :: j, k - real(WP),dimension(:,:,:),intent(inout) :: scal - integer, dimension(2), intent(in) :: send_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer, dimension(:), intent(in) :: send_rank ! rank of processus to wich I send information - integer, dimension(2, 2), intent(in) :: rece_gap ! distance between me and processus to wich I send information - integer, dimension(:), intent(in) :: rece_rank ! rank of processus wich send me information - integer, intent(in) :: nb_r, nb_s ! number of reception/send - integer, dimension(:,:), intent(in) :: cartography ! cartography(proc_gap) contains the set of the lines indice in the block to wich the - ! current processus will send data during remeshing and for each of these lines the range - ! of mesh points from where it requiers the velocity values. - integer, dimension(:,:), intent(in) :: rece_carto ! same as above but for what I receive - real(WP),dimension(:), intent(in) :: send_buffer ! buffer use to remesh the scalar before to send it to the right subdomain - ! sorted by receivers and not by coordinate. - integer, dimension(0:nb_s), intent(inout) :: pos_in_buffer! buffer size - integer, intent(in) :: min_size ! tool to mange buffer - begin indice in first and last to stock indice along first dimension of the group line - - ! Other local variables - integer :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the - ! processus associated to a given position - integer :: ind_gap - integer :: ind_1Dtable ! indice of my current position inside a one-dimensionnal table - ! Variable used to update scalar field from the buffers - real(WP),dimension(:),allocatable,target:: rece_buffer ! buffer use to receive scalar field from other processes. - integer, dimension(:), allocatable :: rece_pos ! cells of indice from rece_pos(i) to rece_proc(i+1) into rece_buffer - ! are devoted to the processus of relative position = i - ! Variable use to manage mpi communications - integer, dimension(:), allocatable :: s_request_sca! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: r_request_sca! mpi communication request (handle) of nonblocking receive - integer, dimension(:,:), allocatable :: s_status ! mpi communication status of nonblocking send - integer, dimension(mpi_status_size) :: r_status ! another mpi communication status - integer :: tag ! mpi message tag - integer :: ierr ! mpi error code - integer :: missing_msg ! number of remeshing buffer not yet received - - - ! ===== Receive buffer (init receive before send) ===== - ! -- Compute size of reception buffer and split it into part corresponding to each sender -- - allocate(rece_pos(rece_gap(1,1):rece_gap(1,2)+1)) - rece_pos(rece_gap(1,1)) = 1 - ind_gap = 0 - do proc_gap = rece_gap(1,1), rece_gap(1,2) - ind_gap = ind_gap + 1 - rece_pos(proc_gap+1)= rece_pos(proc_gap) + rece_carto(1,ind_gap) - end do - allocate(rece_buffer(rece_pos(rece_gap(1,2)+1)-1)) - ! -- And initialize the reception -- - allocate(r_request_sca(1:nb_r)) - r_request_sca = MPI_REQUEST_NULL - ind_gap = 0 - do proc_gap = rece_gap(1,1), rece_gap(1,2) - ind_gap = ind_gap + 1 ! = proc_gap - rece_gap(1,1)+1 - if (rece_rank(ind_gap)/= D_rank(direction)) then - tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, -proc_gap) - call mpi_Irecv(rece_buffer(rece_pos(proc_gap)), rece_carto(1,ind_gap), & - & MPI_DOUBLE_PRECISION, rece_rank(ind_gap), tag, D_COMM(direction), r_request_sca(ind_gap), ierr) - end if - end do - - ! ===== Send buffer ===== - missing_msg = nb_r - allocate(s_request_sca(1:nb_s)) - s_request_sca = MPI_REQUEST_NULL - ! -- Send the buffer to the matching processus and update the scalar field -- - do ind_gap = 1, nb_s - if (send_rank(ind_gap)/=D_rank(direction)) then - ! Send buffer - tag = compute_tag(ind_group, tag_bufToScal_buffer, direction, ind_gap-1+send_gap_abs(1)) - call mpi_ISsend(send_buffer(pos_in_buffer(ind_gap-1)), cartography(1,ind_gap), MPI_DOUBLE_PRECISION, & - & send_rank(ind_gap), tag, D_comm(direction), s_request_sca(ind_gap),ierr) - else - ! Range I want - store into the cartography - gap = -(ind_gap-1+send_gap_abs(1))*N_proc(direction) - ! Update directly the scalar field - call remesh_buffer_to_scalar_pt(gs, j, k, ind_gap, gap, min_size, & - & cartography, send_buffer, scal, pos_in_buffer(ind_gap-1)) - missing_msg = missing_msg - 1 - end if - end do - - ! ===== Update scalar field ===== - do while (missing_msg >= 1) - ! --- Choose one of the first available message --- - ! more precisly: the last reception ended (and not free) and if not such - ! message available, the first reception ended. - call mpi_waitany(nb_r, r_request_sca, ind_1Dtable, r_status, ierr) - ! -- Update the scalar field by using the cartography -- - ! Range I want - store into the cartography - proc_gap = ind_1Dtable + rece_gap(1,1)-1 - gap = proc_gap*N_proc(direction) - call remesh_buffer_to_scalar_pt(gs, j, k, ind_1Dtable, gap, min_size, & - & rece_carto, rece_buffer, scal, rece_pos(proc_gap)) - missing_msg = missing_msg - 1 - end do - - ! ===== Free memory and communication buffer ==== - ! -- Deallocate all field -- - deallocate(rece_pos) - deallocate(rece_buffer) - deallocate(r_request_sca) - ! -- Check if Isend are done -- - allocate(s_status(MPI_STATUS_SIZE,1:nb_s)) - call mpi_waitall(nb_s, s_request_sca, s_status, ierr) - deallocate(s_status) - ! -- Free all communication buffer and data -- - deallocate(s_request_sca) - -end subroutine AC_remesh_finalize - - -end module advec_common_remesh -!> @} diff --git a/HySoP/src/scalesInterface/particles/advec_common_velo.f90 b/HySoP/src/scalesInterface/particles/advec_common_velo.f90 deleted file mode 100644 index f492901f36e8f53da10cf6468a61a17a8e61c3ca..0000000000000000000000000000000000000000 --- a/HySoP/src/scalesInterface/particles/advec_common_velo.f90 +++ /dev/null @@ -1,699 +0,0 @@ -!> @addtogroup part -!! @{ -!------------------------------------------------------------------------------ -! -! MODULE: advec_common_velo -! -! -! DESCRIPTION: -!> The module ``advec_common_velo'' gather function and subroutines used to interpolate -!! velocity at particle position which are not specific to a direction -!! @details -!! This module gathers functions and routines used to advec scalar which are not -!! specific to a direction. This is a parallel implementation using MPI and -!! the cartesien topology it provides. It also contains the variables common to -!! the solver along each direction and other generic variables used for the -!! advection based on the particle method. -!! -!! Except for testing purpose, this module is not supposed to be used by the -!! main code but only by the other advection module. More precisly, an final user -!! must only used the generic "advec" module wich contain all the interface to -!! solve the advection equation with the particle method, and to choose the -!! remeshing formula, the dimensionnal splitting and everything else. Except for -!! testing purpose, the other advection modules have only to include -!! "advec_common". -!! -!! The module "test_advec" can be used in order to validate the procedures -!! embedded in this module. -! -!> @author -!! Jean-Baptiste Lagaert, LEGI -! -!------------------------------------------------------------------------------ - -module advec_common_velo - - use precision_tools - use structure_tools - use advec_abstract_proc - - implicit none - - - ! Information about the particles and their bloc - public - - - ! ===== Public procedures ===== - !----- To interpolate velocity ----- - public :: AC_velocity_interpol_group - public :: AC_velocity_interpol_no_com - public :: AC_velocity_determine_communication - - ! ===== Public variables ===== - - ! ===== Private variables ===== - - -contains - -! ===== Public procedure ===== - -! ================================================================================== -! ==================== Compute particle velocity (RK2) ==================== -! ================================================================================== - -!> Interpolate the velocity field used in a RK2 scheme for particle advection - -!! version for a group of (more of one) line -!! @param[in] dt = time step -!! @param[in] direction = current direction (1 = along X, 2 = along Y and 3 = along Z) -!! @param[in] gs = size of a group (ie number of line it gathers along the two other directions) -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] p_pos_adim = adimensionned particle postion -!! @param[in,out] p_V = particle velocity (along the current direction) -!! @details -!! A RK2 scheme is used to advect the particles : the midlle point scheme. An -!! intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then -!! the numerical velocity of each particles is computed as the interpolation of V in -!! this point. This field is used to advect the particles at the seconde order in time : -!! p_pos(t+dt, i) = p_pos(i) + p_V(i). -!! The group line indice is used to ensure using unicity of each mpi message tag. -!! The interpolation is done for a group of lines, allowing to mutualise -!! communications. Considering a group of Na X Nb lines, communication performed -!! by this algorithm are around (Na x Nb) bigger than the alogorithm wich -!! works on a single line but also around (Na x Nb) less frequent. -subroutine AC_velocity_interpol_group(dt, direction, gs, ind_group, p_pos_adim, p_V) - - ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but in a such code - ! a memory copy is still needed to send velocity field to other processus : mpi send contiguous memory values - - use mpi - use cart_topology ! info about mesh and mpi topology - use advec_variables ! contains info about solver parameters and others. - - ! Input/Ouput - real(WP), intent(in) :: dt ! time step - integer, intent(in) :: direction ! current direction - integer, dimension(2),intent(in) :: gs ! groupe size - integer, dimension(2), intent(in) :: ind_group - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim - real(WP), dimension(:,:,:),intent(inout),target :: p_V - ! Others, local - real(WP),dimension(N_proc(direction),gs(1),gs(2)), target :: p_posV_bis ! temporaily field to store middle point velocity and position - real(WP), dimension(N_proc(direction),gs(1),gs(2)) :: weight ! interpolation weight - type(real_pter),dimension(N_proc(direction),gs(1),gs(2)) :: Vp, Vm ! Velocity on previous and next mesh point - real(WP), dimension(:), allocatable, target :: V_buffer ! Velocity buffer for postion outside of the local subdomain - integer, dimension(:), allocatable :: pos_in_buffer! buffer size - integer , dimension(gs(1), gs(2)) :: rece_ind_min ! minimal indice of mesh involved in remeshing particles (of my local subdomains) - integer , dimension(gs(1), gs(2)) :: rece_ind_max ! maximal indice of mesh involved in remeshing particles (of my local subdomains) - integer :: ind, ind_com ! indices - integer :: i1, i2 ! indices in the lines group - integer :: pos, pos_old ! indices of the mesh point wich preceed the particle position - integer :: proc_gap, gap! distance between my (mpi) coordonate and coordinate of the - ! processus associated to a given position - integer, dimension(:), allocatable :: rece_rank ! rank of processus wich send me information - integer, dimension(:), allocatable :: send_rank ! rank of processus to wich I send information - integer :: rankP ! rank of processus ("source rank" returned by mpi_cart_shift) - integer, dimension(:), allocatable :: send_carto ! cartography of what I have to send - integer :: ind_1Dtable ! indice of my current position inside a one-dimensionnal table - integer :: ind_for_i1 ! where to read the first coordinate (i1) of the current line inside the cartography ? - type(real_1D_pter),dimension(:),allocatable :: send_buffer ! to store what I have to send (on a contiguous way) - !real(WP), dimension(:), allocatable :: send_buffer ! to store what I have to send (on a contiguous way) - integer, dimension(gs(1),gs(2),2) :: rece_gap ! distance between me and processus wich send me information - integer, dimension(2 , 2) :: send_gap ! distance between me and processus to wich I send information - integer, dimension(2) :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer :: com_size ! size of message send/receive - integer, dimension(:), allocatable :: size_com ! size of message send/receive - integer :: min_size ! minimal size of cartography(:,proc_gap) - integer :: max_size ! maximal size of cartography(:,proc_gap) - integer :: tag ! mpi message tag - integer, dimension(:), allocatable :: tag_proc ! mpi message tag - integer :: ierr ! mpi error code - integer, dimension(:), allocatable :: s_request ! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: s_request_bis! mpi communication request (handle) of nonblocking send - integer, dimension(:), allocatable :: rece_request ! mpi communication request (handle) of nonblocking receive - integer, dimension(MPI_STATUS_SIZE) :: rece_status ! mpi status (for mpi_wait) - type(int_1D_pter), dimension(:), allocatable:: cartography ! cartography(proc_gap) contains the set of the lines indice in the block for wich the - ! current processus requiers data from proc_gap and for each of these lines the range - ! of mesh points from where it requiers the velocity values. -! XXX Todo : à vérifier mais il semble inutile de stocker send_rank : il n'est -! utilisé que pour les mpi_wait mais une initialisation des request à -! MPI_REQUEST_NULL devrait permettre de s'en débarrasser. - - ! -- Initialisation -- - do i2 = 1, gs(2) - do i1 = 1, gs(1) - do ind = 1, N_proc(direction) - nullify(Vp(ind,i1,i2)%pter) - nullify(Vm(ind,i1,i2)%pter) - end do - end do - end do - ! Compute the midlle point - p_posV_bis = p_pos_adim + (dt/2.0)*p_V/d_sc(direction) - -! XXX Todo / Optim -! Ici recopie de la vitesse. On doit la faire car on calcule la vitesse -! interpolée à partir d' "elle-même" : la variable calculée dépend de sa -! précédente valeur en des points qui peuvent différé. Si on l'écrase au fur et -! à mesure on fait des calculs faux. -! On pourrait utiliser directement le champ de vitesse V en entrée pour donner -! p_V en sortie, mais cela pose un problème car selon les directions il faudrait -! changer l'ordre des indices i, i1 et i2. Ce qui est un beau bordel !! -! XXX - ! Compute range of the set of point where I need the velocity value - rece_ind_min = floor(p_posV_bis(1,:,:)) - rece_ind_max = floor(p_posV_bis(N_proc(direction),:,:)) + 1 - - ! ===== Exchange velocity field if needed ===== - ! It uses non blocking message to do the computations during the communication process - ! -- What have I to communicate ? -- - rece_gap(:,:,1) = floor(real(rece_ind_min-1)/N_proc(direction)) - rece_gap(:,:,2) = floor(real(rece_ind_max-1)/N_proc(direction)) - rece_gap_abs(1) = minval(rece_gap(:,:,1)) - rece_gap_abs(2) = maxval(rece_gap(:,:,2)) - max_size = 2 + gs(2)*(2+3*gs(1)) - allocate(cartography(rece_gap_abs(1):rece_gap_abs(2))) - allocate(rece_rank(rece_gap_abs(1):rece_gap_abs(2))) - call AC_velocity_determine_communication(direction, ind_group, gs, send_gap, & - & rece_gap, rece_gap_abs, rece_rank, cartography, max_size) - - ! -- Send messages about what I want -- - allocate(s_request_bis(rece_gap_abs(1):rece_gap_abs(2))) - allocate(size_com(rece_gap_abs(1):rece_gap_abs(2))) - allocate(tag_proc(rece_gap_abs(1):rece_gap_abs(2))) - min_size = 2 + gs(2) - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - if (rece_rank(proc_gap) /= D_rank(direction)) then - ! Use the cartography to know which lines are concerned - size_com(proc_gap) = cartography(proc_gap)%pter(2) - ! Range I want - store into the cartography - gap = proc_gap*N_proc(direction) - ! Position in cartography(proc_gap)%pter(:) of the current i1 indice - ind_for_i1 = min_size - do i2 = 1, gs(2) - do ind = ind_for_i1+1, ind_for_i1 + cartography(proc_gap)%pter(2+i2), 2 - do i1 = cartography(proc_gap)%pter(ind), cartography(proc_gap)%pter(ind+1) - ! Interval start from: - cartography(proc_gap)%pter(size_com(proc_gap)+1) = max(rece_ind_min(i1,i2), gap+1) ! fortran => indice start from 0 - ! and ends at: - cartography(proc_gap)%pter(size_com(proc_gap)+2) = min(rece_ind_max(i1,i2), gap+N_proc(direction)) - ! update number of element to receive - cartography(proc_gap)%pter(1) = cartography(proc_gap)%pter(1) & - & + cartography(proc_gap)%pter(size_com(proc_gap)+2) & - & - cartography(proc_gap)%pter(size_com(proc_gap)+1) + 1 - size_com(proc_gap) = size_com(proc_gap)+2 - end do - end do - ind_for_i1 = ind_for_i1 + cartography(proc_gap)%pter(2+i2) - end do - ! Tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id. - tag_proc(proc_gap) = compute_tag(ind_group, tag_velo_range, direction, proc_gap) - ! Send message - call mpi_ISsend(cartography(proc_gap)%pter(1), size_com(proc_gap), MPI_INTEGER, rece_rank(proc_gap), & - & tag_proc(proc_gap), D_comm(direction), s_request_bis(proc_gap),ierr) - end if - end do - - ! -- Send the velocity field to processus which need it -- - allocate(s_request(send_gap(1,1):send_gap(1,2))) - allocate(send_rank(send_gap(1,1):send_gap(1,2))) - allocate(send_carto(max_size)) - allocate(send_buffer(send_gap(1,1):send_gap(1,2))) -! XXX Todo : compter le nombre de messages à recevoir puis les traiter dans -! l'ordre où ils arrivent via un MPI_ANY_PROC ? Mais alors il faut lier rang et -! coordonnées ... ce qui signifie ajouter un appel à un mpi_cart_cood ... ou -! envoyer le rand dans la cartographie !! -! A voir ce qui est le mieux. - do proc_gap = send_gap(1,1), send_gap(1,2) - call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, send_rank(proc_gap), ierr) - if (send_rank(proc_gap) /= D_rank(direction)) then - ! I - Receive messages about what I have to send - ! Ia - Compute reception tag = concatenation of (rank+1), ind_group(1), ind_group(2), direction et unique Id. - tag = compute_tag(ind_group, tag_velo_range, direction, -proc_gap) - ! Ib - Receive the message - call mpi_recv(send_carto(1), max_size, MPI_INTEGER, send_rank(proc_gap), tag, D_comm(direction), rece_status, ierr) - ! II - Send it - ! IIa - Create send buffer - allocate(send_buffer(proc_gap)%pter(send_carto(1))) - !allocate(send_buffer(send_carto(1))) - gap = proc_gap*N_proc(direction) - com_size = 0 - ind_1Dtable = send_carto(2) - ! Position in cartography(:,proc_gap) of the current i1 indice - ind_for_i1 = min_size - do i2 = 1, gs(2) - do ind = ind_for_i1+1, ind_for_i1 + send_carto(2+i2), 2 - do i1 = send_carto(ind), send_carto(ind+1) - do ind_com = send_carto(ind_1Dtable+1)+gap, send_carto(ind_1Dtable+2)+gap ! indice inside the current line - com_size = com_size + 1 - send_buffer(proc_gap)%pter(com_size) = p_V(ind_com, i1,i2) - !send_buffer(com_size) = p_V(ind_com, i1,i2) - end do - ind_1Dtable = ind_1Dtable + 2 - end do - end do - ind_for_i1 = ind_for_i1 + send_carto(2+i2) - end do - ! IIb - Compute send tag - tag = compute_tag(ind_group, tag_velo_V, direction, proc_gap) - ! IIc - Send message - !call mpi_Isend(send_buffer(1), com_size, MPI_DOUBLE_PRECISION, & - call mpi_ISsend(send_buffer(proc_gap)%pter(1), com_size, MPI_DOUBLE_PRECISION, & - & send_rank(proc_gap), tag, D_comm(direction), s_request(proc_gap), ierr) - !deallocate(send_buffer) - end if - end do - deallocate(send_carto) - - ! -- Non blocking reception of the velocity field -- - ! Allocate the pos_in_buffer to compute V_buffer size and to be able to - ! allocate it. - allocate(pos_in_buffer(rece_gap_abs(1):rece_gap_abs(2))) - pos_in_buffer(rece_gap_abs(1)) = 1 - do proc_gap = rece_gap_abs(1), rece_gap_abs(2)-1 - pos_in_buffer(proc_gap+1)= pos_in_buffer(proc_gap) + cartography(proc_gap)%pter(1) - end do - allocate(V_buffer(pos_in_buffer(rece_gap_abs(2)) & - & + cartography(rece_gap_abs(2))%pter(1))) - V_buffer = 0 - allocate(rece_request(rece_gap_abs(1):rece_gap_abs(2))) - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - if (rece_rank(proc_gap) /= D_rank(direction)) then - ! IIa - Compute reception tag - tag = compute_tag(ind_group, tag_velo_V, direction, -proc_gap) - ! IIb - Receive message - call mpi_Irecv(V_buffer(pos_in_buffer(proc_gap)), cartography(proc_gap)%pter(1), MPI_DOUBLE_PRECISION, & - & rece_rank(proc_gap), tag, D_comm(direction), rece_request(proc_gap), ierr) - end if - end do - - !-- Free som ISsend buffer and some array -- - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - !if (associated(cartography(proc_gap)%pter)) deallocate(cartography(proc_gap)%pter) - deallocate(cartography(proc_gap)%pter) - end do - deallocate(cartography) ! We do not need it anymore -! XXX Todo : préférer un call MPI_WAIT_ALL couplé avec une init de s_request_bis -! sur MPI_REQUEST_NULL et enlever la boucle ET le if. - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - if (rece_rank(proc_gap) /= D_rank(direction)) then - call MPI_WAIT(s_request_bis(proc_gap),rece_status,ierr) - end if - end do - deallocate(s_request_bis) - deallocate(tag_proc) - deallocate(size_com) - - ! ===== Compute the interpolated velocity ===== - ! -- Compute the interpolation weight and update the pointers Vp and Vm -- - do i2 = 1, gs(2) - do i1 = 1, gs(1) - ! Initialisation of reccurence process - ind = 1 - pos = floor(p_posV_bis(ind,i1,i2)) - weight(ind,i1,i2) = p_posV_bis(ind,i1,i2)-pos - ! Vm = V(pos) - proc_gap = floor(real(pos-1)/N_proc(direction)) - if (rece_rank(proc_gap) == D_rank(direction)) then - Vm(ind,i1,i2)%pter => p_V(pos-proc_gap*N_proc(direction), i1,i2) - else - Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)) - pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1 - end if - ! Vp = V(pos+1) - proc_gap = floor(real(pos+1-1)/N_proc(direction)) - if (rece_rank(proc_gap) == D_rank(direction)) then - Vp(ind,i1,i2)%pter => p_V(pos+1-proc_gap*N_proc(direction), i1,i2) - else - Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)) - pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1 - end if - pos_old = pos - - ! Following indice : we use previous work (already done) - do ind = 2, N_proc(direction) - pos = floor(p_posV_bis(ind,i1,i2)) - weight(ind,i1,i2) = p_posV_bis(ind,i1,i2)-pos - select case(pos-pos_old) - case(0) - ! The particle belongs to the same segment than the previous one - Vm(ind,i1,i2)%pter => Vm(ind-1,i1,i2)%pter - Vp(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter - case(1) - ! The particle follows the previous one - Vm(ind,i1,i2)%pter => Vp(ind-1,i1,i2)%pter - ! Vp = V(pos+1) - proc_gap = floor(real(pos+1-1)/N_proc(direction)) ! fortran -> indice starts from 1 - if (rece_rank(proc_gap) == D_rank(direction)) then - Vp(ind,i1,i2)%pter => p_V(pos+1-proc_gap*N_proc(direction), i1,i2) - else - Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)) - pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1 - end if - case(2) - ! pos = pos_old +2, wich correspond to "extention" - ! Vm = V(pos) - proc_gap = floor(real(pos-1)/N_proc(direction)) - if (rece_rank(proc_gap) == D_rank(direction)) then - Vm(ind,i1,i2)%pter => p_V(pos-proc_gap*N_proc(direction), i1,i2) - else - Vm(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)) - pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1 - end if - ! Vp = V(pos+1) - proc_gap = floor(real(pos+1-1)/N_proc(direction)) - if (rece_rank(proc_gap) == D_rank(direction)) then - Vp(ind,i1,i2)%pter => p_V(pos+1-proc_gap*N_proc(direction), i1,i2) - else - Vp(ind,i1,i2)%pter => V_buffer(pos_in_buffer(proc_gap)) - pos_in_buffer(proc_gap) = pos_in_buffer(proc_gap) + 1 - end if - case default - print*, "unexpected case : pos = ", pos, " , pos_old = ", pos_old, & - & " ind = ", ind, " i1 = ", i1, " i2 = ", i2 - end select - pos_old = pos - end do ! loop on particle indice inside the current line - end do ! loop on first coordinate (i1) of a line inside the block of line - end do ! loop on second coordinate (i2) of a line inside the block of line - - deallocate(pos_in_buffer) ! We do not need it anymore - - ! -- Compute the interpolate velocity -- - ! Check if communication are done - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - if (rece_rank(proc_gap)/=D_rank(direction)) then - call mpi_wait(rece_request(proc_gap), rece_status, ierr) - end if - end do - deallocate(rece_request) - - ! Then compute the field - do i2 = 1, gs(2) - do i1 = 1, gs(1) - do ind = 1, N_proc(direction) - p_posV_bis(ind,i1,i2) = weight(ind,i1,i2)*Vp(ind,i1,i2)%pter + (1.-weight(ind,i1,i2))*Vm(ind,i1,i2)%pter - end do - end do - end do - p_V = p_posV_bis - - - ! ===== Free memory ===== - ! -- Pointeur -- - do i2 = 1, gs(2) - do i1 = 1, gs(1) - do ind = 1, N_proc(direction) - nullify(Vp(ind,i1,i2)%pter) - nullify(Vm(ind,i1,i2)%pter) - end do - end do - end do - ! -- Mpi internal buffer for non blocking communication -- - do proc_gap = send_gap(1,1), send_gap(1,2) - if (send_rank(proc_gap) /= D_rank(direction)) then - call MPI_WAIT(s_request(proc_gap),rece_status,ierr) - deallocate(send_buffer(proc_gap)%pter) - end if - end do - deallocate(send_buffer) - deallocate(s_request) - ! -- Deallocate dynamic array -- - deallocate(V_buffer) - deallocate(rece_rank) - deallocate(send_rank) - -end subroutine AC_velocity_interpol_group - - -!> Determine the set of processes wich will send me information during the velocity interpolation and compute -!! for each of these processes the range of wanted data. -!! @param[in] direction = current direction (1 = along X, 2 = along Y, 3 = along Z) -!! @param[in] gp_s = size of a group (ie number of line it gathers along the two other directions) -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[in] ind_group = coordinate of the current group of lines -!! @param[out] send_gap = gap between my coordinate and the processes of minimal coordinate which will send information to me -!! @param[in] rece_gap = gap between my coordinate and the processes of maximal coordinate which will receive information from me -!! @param[in] rece_gap_abs = min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) -!! @param[out] cartography = cartography(proc_gap) contains the set of the lines indice in the block for wich the -!! current processus requiers data from proc_gap and for each of these lines the range -!! of mesh points from where it requiers the velocity values. -!! @details -!! Work on a group of line of size gs(1) x gs(2)) -!! Obtain the list of processus wich need a part of my local velocity field -!! to interpolate the velocity used in the RK2 scheme to advect its particles. -!! In the same time, it computes for each processus from which I need a part -!! of the velocity field, the range of mesh point where I want data and store it -!! by using some sparse matrix technics (see cartography defined in the -!! algorithm documentation) -subroutine AC_velocity_determine_communication(direction, ind_group, gs, send_gap, & - & rece_gap, rece_gap_abs, rece_rank, cartography, max_size) -! XXX Work only for periodic condition. - - use cart_topology ! info about mesh and mpi topology - use advec_variables ! contains info about solver parameters and others. - use mpi - - - ! Input/Ouput - integer, intent(in) :: direction - integer, dimension(2), intent(in) :: ind_group - integer, dimension(2), intent(in) :: gs - integer, dimension(gs(1), gs(2), 2), intent(in) :: rece_gap - integer, dimension(2), intent(in) :: rece_gap_abs ! min (resp max) value of rece_gap(:,:,i) with i=1 (resp 2) - integer,dimension(rece_gap_abs(1):rece_gap_abs(2))& - &, intent(out) :: rece_rank ! rank of processus wich send me information - integer, dimension(2, 2), intent(out) :: send_gap - type(int_1D_pter), dimension( & - & rece_gap_abs(1):rece_gap_abs(2)), intent(out) :: cartography - integer, intent(in) :: max_size - ! Others - integer :: proc_gap ! gap between a processus coordinate (along the current - ! direction) into the mpi-topology and my coordinate - integer, dimension(gs(1), gs(2)) :: rece_gapP ! gap between the coordinate of the previous processus (in the current direction) - ! and the processes of maximal coordinate which will receive information from it - integer, dimension(gs(1), gs(2)) :: rece_gapN ! same as above but for the next processus - integer :: rankP ! processus rank for shift (P= previous, N = next) - integer :: send_request_gh ! mpi status of noindicelocking send - integer :: send_request_gh2 ! mpi status of noindicelocking send - integer :: ierr ! mpi error code - integer, dimension(2) :: tag_table ! some mpi message tag - logical, dimension(:,:), allocatable:: test_request ! for mpi non blocking communication - integer, dimension(:,:), allocatable:: send_request ! for mpi non blocking send - integer :: ind1, ind2 ! indice of the current line inside the group - integer,dimension(2) :: rece_buffer ! buffer for reception of rece_max - integer, dimension(:,:), allocatable:: first, last ! Storage processus to which I will be the first (or the last) to receive - integer :: min_size ! begin indice in first and last to stock indice along first dimension of the group line - integer :: gp_size ! group size - logical :: begin_interval ! ware we in the start of an interval ? - logical :: not_myself ! Is the target processus myself ? - integer, dimension(MPI_STATUS_SIZE) :: statut - - send_gap(1,1) = 3*N(direction) - send_gap(1,2) = -3*N(direction) - send_gap(2,:) = 0 - gp_size = gs(1)*gs(2) - - ! ===== Communicate with my neigbors -> obtain ghost ! ==== - ! Inform that about processus from which I need information - tag_table = compute_tag(ind_group, tag_obtrec_ghost_NP, direction) - call mpi_ISsend(rece_gap(1,1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(1), & - & D_comm(direction), send_request_gh, ierr) - call mpi_ISsend(rece_gap(1,1,2), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(2), & - & D_comm(direction), send_request_gh2, ierr) - ! Receive the same message form my neighbors - call mpi_recv(rece_gapN(1,1), gp_size, MPI_INTEGER, neighbors(direction,2), tag_table(1), D_comm(direction), statut, ierr) - call mpi_recv(rece_gapP(1,1), gp_size, MPI_INTEGER, neighbors(direction,1), tag_table(2), D_comm(direction), statut, ierr) - - ! ===== Compute if I am first or last ===== - min_size = 2 + gs(2) - ! Initialize first and last to determine if I am the the first or the last processes (considering the current direction) - ! to require information from this processus - allocate(first(2,rece_gap_abs(1):rece_gap_abs(2))) - first(2,:) = 0 ! number of lines for which I am the first - allocate(last(2,rece_gap_abs(1):rece_gap_abs(2))) - last(2,:) = 0 ! number of lines for which I am the last - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - first(1,proc_gap) = -proc_gap - last(1,proc_gap) = -proc_gap - end do - ! Compute - do ind2 = 1, gs(2) - do ind1 = 1, gs(1) - ! Proc_gap must belongs to [rece_gap(ind1,ind2,1);rece_gap(ind1,ind2,2)]. - ! I am the first if (proc_gap>rece_gapP(ind1,ind2)-1). - do proc_gap = rece_gapP(ind1,ind2), rece_gap(ind1,ind2,2) - first(2,proc_gap) = first(2,proc_gap)+1 - end do - ! Proc_gap must belongs to [rece_gap(ind1,ind2,1);rece_gap(ind1,ind2,2)]. - ! I am the last if (proc_gap<rece_gapN(ind1,ind2)+1). - do proc_gap = rece_gap(ind1,ind2,1), rece_gapN(ind1,ind2) - last(2,proc_gap) = last(2,proc_gap)+1 - end do - end do - end do - - ! ===== Determine the cartography ===== - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - call mpi_cart_shift(D_comm(direction), 0, proc_gap, rankP, rece_rank(proc_gap), ierr) - !not_myself = (rece_rank(proc_gap) /= D_rank(direction)) ! Is the target processus myself ? - ! Nothing to do if target processus is myself - !if (not_myself) then - if (rece_rank(proc_gap) /= D_rank(direction)) then - ! Allocate cartography(proc_gap) - allocate(cartography(proc_gap)%pter(max_size)) - ! Initialize cartography - cartography(proc_gap)%pter(1) = 0 ! number of velocity values to receive - cartography(proc_gap)%pter(2) = min_size ! number of element to send when sending cartography - ! Update cartography - do ind2 = 1, gs(2) - cartography(proc_gap)%pter(2+ind2) = 0 ! 2 x number of interval of concern line into the column i2 - begin_interval = .true. - do ind1 = 1, gs(1) - ! Does proc_gap belongs to [rece_gap(i1,i2,1);rece_gap(i1,i2,2)]? - if((proc_gap>=rece_gap(ind1,ind2,1)).and.(proc_gap<=rece_gap(ind1,ind2,2))) then - if (begin_interval) then - cartography(proc_gap)%pter(2+ind2) = cartography(proc_gap)%pter(2+ind2)+2 - cartography(proc_gap)%pter(cartography(proc_gap)%pter(2)+1) = ind1 - cartography(proc_gap)%pter(2) = cartography(proc_gap)%pter(2) + 2 - cartography(proc_gap)%pter(cartography(proc_gap)%pter(2)) = ind1 - begin_interval = .false. - else ! begin_interval - cartography(proc_gap)%pter(cartography(proc_gap)%pter(2)) = ind1 - end if ! begin_interval - else - begin_interval = .true. - end if ! rec_gap(1) <= proc_gap <= rece_gap(2) - end do ! ind1 - end do ! ind2 - else ! not myself - allocate(cartography(proc_gap)%pter(1)) - cartography(proc_gap)%pter(1)=0 - end if ! not myself - end do ! proc_gap - - ! ===== Free Isend buffer from first communication ===== - call MPI_WAIT(send_request_gh,statut,ierr) - call MPI_WAIT(send_request_gh2,statut,ierr) - - ! ===== Send information about first and last ===== - tag_table = compute_tag(ind_group, tag_obtrec_NP, direction) - allocate(send_request(rece_gap_abs(1):rece_gap_abs(2),2)) - allocate(test_request(rece_gap_abs(1):rece_gap_abs(2),2)) - test_request = .false. - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - ! I am the first ? - if (first(2,proc_gap)>0) then - if(rece_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(first(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(1), D_comm(direction), & - & send_request(proc_gap,1), ierr) - test_request(proc_gap,1) = .true. - else - send_gap(1,1) = min(send_gap(1,1), -proc_gap) - send_gap(2,1) = send_gap(2,1) + first(2,proc_gap) - end if - end if - ! I am the last ? - if (last(2,proc_gap)>0) then - if(rece_rank(proc_gap)/= D_rank(direction)) then - call mpi_ISsend(last(1,proc_gap), 2, MPI_INTEGER, rece_rank(proc_gap), tag_table(2), D_comm(direction), & - & send_request(proc_gap,2), ierr) - test_request(proc_gap,2) = .true. - else - send_gap(1,2) = max(send_gap(1,2), -proc_gap) - send_gap(2,2) = send_gap(2,2) + last(2,proc_gap) - end if - end if - end do - - - - ! ===== Receive information form the first and the last processus which need a part of my local velocity field ===== - do while(send_gap(2,1) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(1), D_comm(direction), statut, ierr) - send_gap(1,1) = min(send_gap(1,1), rece_buffer(1)) - send_gap(2,1) = send_gap(2,1) + rece_buffer(2) - end do - do while(send_gap(2,2) < gp_size) - call mpi_recv(rece_buffer(1), 2, MPI_INTEGER, MPI_ANY_SOURCE, tag_table(2), D_comm(direction), statut, ierr) - send_gap(1,2) = max(send_gap(1,2), rece_buffer(1)) - send_gap(2,2) = send_gap(2,2) + rece_buffer(2) - end do - - ! ===== Free Isend buffer ===== - do proc_gap = rece_gap_abs(1), rece_gap_abs(2) - if (test_request(proc_gap,1).eqv. .true.) call MPI_WAIT(send_request(proc_gap,1),statut,ierr) - if (test_request(proc_gap,2)) call MPI_WAIT(send_request(proc_gap,2),statut,ierr) - end do - deallocate(send_request) - deallocate(test_request) - - ! ===== Deallocate array ===== - deallocate(first) - deallocate(last) - -end subroutine AC_velocity_determine_communication - - -!> Interpolate the velocity field used in a RK2 scheme for particle advection - -!! version for direction with no domain subdivision ands thus no required -!! communications -!! @param[in] dt = time step -!! @param[in] direction = current direction (1 = along X, 2 = along Y and 3 = along Z) -!! @param[in] gs = size of a group (ie number of line it gathers along the two other directions) -!! @param[in] p_pos_adim = adimensionned particle postion -!! @param[in,out] p_V = particle velocity (along the current direction) -!! @details -!! A RK2 scheme is used to advect the particles : the midlle point scheme. An -!! intermediary position "p_pos_bis(i) = p_pos(i) + V(i)*dt/2" is computed and then -!! the numerical velocity of each particles is computed as the interpolation of V in -!! this point. This field is used to advect the particles at the seconde order in time : -!! p_pos(t+dt, i) = p_pos(i) + p_V(i). -!! Variant for cases with no required communication. -subroutine AC_velocity_interpol_no_com(dt, direction, gs, p_pos_adim, p_V) - - ! This code involve a recopy of p_V. It is possible to directly use the 3D velocity field but it will also limit the meroy access. - - use cart_topology ! info about mesh and mpi topology - use advec_variables ! contains info about solver parameters and others. - - ! Input/Ouput - real(WP), intent(in) :: dt ! time step - integer, intent(in) :: direction ! current direction - integer, dimension(2),intent(in) :: gs ! groupe size - real(WP), dimension(:,:,:), intent(in) :: p_pos_adim - real(WP), dimension(:,:,:), intent(inout) :: p_V - ! Others, local - real(WP),dimension(N_proc(direction),gs(1),gs(2)) :: p_posV_bis ! adimensionned position of the middle point - integer :: ind ! indices - integer :: i1, i2 ! indices in the lines group - integer :: pos ! indices of the mesh point wich preceed the particle position - - ! ===== Initialisation ===== - ! Compute the midlle point - p_posV_bis = p_pos_adim + (dt/2.0)*p_V/d_sc(direction) - - - ! ===== Compute the interpolated velocity ===== - ! -- Compute the interpolation weight and update the velocity directly in p_posV_bis -- - do i2 = 1, gs(2) - do i1 = 1, gs(1) - do ind = 1, N(direction) - - pos = floor(p_posV_bis(ind,i1,i2)) - p_posV_bis(ind,i1,i2) = p_V(modulo(pos-1,N(direction))+1,i1,i2) + (p_posV_bis(ind,i1,i2)-pos)* & - & (p_V(modulo(pos,N(direction))+1,i1,i2)-p_V(modulo(pos-1,N(direction))+1,i1,i2)) - - end do ! loop on particle indice (ind) - end do ! loop on first coordinate (i1) of a line inside the block of line - end do ! loop on second coordinate (i2) of a line inside the block of line - - ! -- Recopy velocity in the right array -- - p_V = p_posV_bis - -end subroutine AC_velocity_interpol_no_com - - -end module advec_common_velo -!> @}