Skip to content
Snippets Groups Projects
Commit b2f0e5dc authored by Jean-Baptiste Lagaert's avatar Jean-Baptiste Lagaert
Browse files

[maj] Maj de l'interface scales. Au passage, M'8.

parent 0874ed4f
No related branches found
No related tags found
No related merge requests found
Showing
with 4834 additions and 2323 deletions
......@@ -5,8 +5,8 @@
! MODULE: advec
!
!
! DESCRIPTION:
!> The module advec provides all public interfaces to solve an advection equation
! DESCRIPTION:
!> The module advec provides all public interfaces to solve an advection equation
!! with a particle method.
!
!> @details
......@@ -22,15 +22,15 @@
!------------------------------------------------------------------------------
module advec
use string
use precision_tools
use advec_abstract_proc
implicit none
! ===== Private variables =====
!> numerical method use to advect the scalar
character(len=str_short), private :: type_part_solv
character(len=str_short), private :: type_part_solv
!> dimensionnal splitting (eg classical, Strang or particle)
character(len=str_short), private :: dim_splitting
character(len=str_short), private :: dim_splitting
!> Group size along current direction
integer, private, dimension(2) :: gsX, gsY, gsZ
......@@ -46,9 +46,7 @@ module advec
public :: advec_step_Torder2 ! advec the scalar field during a time step.
! Remeshing formula
procedure(AC_remesh), pointer, private :: advecX_remesh => null()
procedure(AC_remesh), pointer, private :: advecY_remesh => null()
procedure(AC_remesh), pointer, private :: advecZ_remesh => null()
procedure(AC_remesh), pointer, private :: advec_remesh => null()
contains
......@@ -58,7 +56,7 @@ contains
!! @return type_part_solver = numerical method used for advection
function type_part_solver()
character(len=str_short) :: type_part_solver
type_part_solver = type_part_solv
end function
......@@ -70,15 +68,11 @@ end function
!! Strang splitting or particle splitting)
!! @param[in] verbosity = to display info about chosen remeshing formula (optional)
subroutine advec_init(order, stab_coeff, verbosity, dim_split)
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advecX ! solver for advection along X
use advecY ! solver for advection along Y
use advecZ ! solver for advection along Z
use advec_common ! some procedures common to advection along all directions
use advec_remesh_line ! contain "old" remeshing tools.
use advec_remeshing_formula ! contain "old" remeshing tools.
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advecX, only: advecX_remesh_init ! solver for advection along X
use advec_common ! some procedures common to advection along all directions
! Input/Output
character(len=*), optional, intent(in) :: order, dim_split
......@@ -88,7 +82,7 @@ subroutine advec_init(order, stab_coeff, verbosity, dim_split)
! Use default solver if it is not chosen by the user.
if(present(order)) then
type_part_solv = order
else
else
type_part_solv = 'p_O2'
end if
......@@ -104,7 +98,7 @@ subroutine advec_init(order, stab_coeff, verbosity, dim_split)
! Default dimensionnal splitting if the user do not choose it
if(present(dim_split)) then
dim_splitting = dim_split
else
else
dim_splitting = 'strang'
end if
......@@ -123,30 +117,26 @@ subroutine advec_init(order, stab_coeff, verbosity, dim_split)
! Call the right remeshing formula
select case(type_part_solv)
case('p_O2')
advecX_remesh => AC_remesh_lambda_group ! or Xremesh_O2
advecY_remesh => AC_remesh_lambda_group ! or Yremesh_O2
advecZ_remesh => AC_remesh_lambda_group ! or Zremesh_O2
advec_remesh => AC_remesh_lambda_group ! or Xremesh_O2
case('p_O4')
advecX_remesh => AC_remesh_lambda_group ! or Xremesh_O4
advecY_remesh => AC_remesh_lambda_group ! or Yremesh_O4
advecZ_remesh => AC_remesh_lambda_group ! or Zremesh_O4
advec_remesh => AC_remesh_lambda_group ! or Xremesh_O4
case('p_L2')
advec_remesh => AC_remesh_limit_lambda_group ! limited and corrected lambda 2
case('p_M6')
advecX_remesh => AC_remesh_Mprime6_group ! Xremesh_Mprime6
advecY_remesh => AC_remesh_Mprime6_group ! Yremesh_Mprime6
advecZ_remesh => AC_remesh_Mprime6_group ! Zremesh_Mprime6
advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
case('p_M8')
advec_remesh => AC_remesh_Mprime_group ! Xremesh_Mprime6
case default
advecX_remesh => AC_remesh_lambda_group ! or Xremesh_O2
advecY_remesh => AC_remesh_lambda_group ! or Yremesh_O2
advecZ_remesh => AC_remesh_lambda_group ! or Zremesh_O2
advec_remesh => AC_remesh_lambda_group ! or Xremesh_O2
end select
call AC_setup_init()
call advecX_remesh_init()
! Save group size
gsX =group_size(1,:)
gsY =group_size(2,:)
gsZ =group_size(3,:)
gsX =group_size(1,:)
gsY =group_size(2,:)
gsZ =group_size(3,:)
end subroutine advec_init
......@@ -155,44 +145,36 @@ end subroutine advec_init
!> Solve advection equation - order 1 in time (classic dimensional splitting)
!! @param[in] dt = time step
!! @param[in] Vx = velocity along x (could be discretised on a bigger mesh then the scalar)
!! @param[in] Vy = velocity along y
!! @param[in] Vy = velocity along y
!! @param[in] Vz = velocity along z
!! @param[in,out] scal = scalar field to advect
subroutine advec_step_Torder1(dt, Vx, Vy, Vz, scal)
use advecX ! Method to advec along X
use advecY ! Method to advec along Y
use advecZ ! Method to advec along Z
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(:,:,:), intent(in) :: Vx, Vy, Vz
real(WP), dimension(:,:,:), intent(inout) :: scal
call advecX_calc_no_com(dt, Vx, scal)
call advecY_calc(dt, Vy, scal)
call advecZ_calc(dt, Vz, scal)
end subroutine advec_step_Torder1
!> Solve advection equation - order 2 in time (order 2 dimensional splitting)
!! @param[in] dt = time step
!! @param[in] Vx = velocity along x (could be discretised on a bigger mesh then the scalar)
!! @param[in] Vy = velocity along y
!! @param[in] Vy = velocity along y
!! @param[in] Vz = velocity along z
!! @param[in,out] scal = scalar field to advect
subroutine advec_step_Torder2(dt, Vx, Vy, Vz, scal)
use advecX ! Method to advec along X
use advecY ! Method to advec along Y
use advecZ ! Method to advec along Z
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(:,:,:), intent(in) :: Vx, Vy, Vz
real(WP), dimension(:,:,:), intent(inout) :: scal
call advecX_calc_no_com(dt/2.0, Vx, scal)
call advecY_calc(dt/2.0, Vy, scal)
call advecZ_calc(dt/2.0, Vz, scal)
......@@ -209,11 +191,10 @@ end subroutine advec_step_Torder2
!! @param[in,out] scal3D = scalar field to advect
subroutine advecX_calc(dt, Vx, scal3D)
use advecX ! Procedure specific to advection along X
use advecX, only : advecX_init_group ! Procedure specific to advection along X
use advec_common ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advec_remesh_line
! Input/Output
real(WP), intent(in) :: dt
......@@ -254,7 +235,7 @@ subroutine advecX_calc(dt, Vx, scal3D)
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
call advecX_remesh(direction, ind_group, gsX, p_pos_adim, p_V, j, k, scal3D, dt)
call advec_remesh(direction, ind_group, gsX, p_pos_adim, p_V, j, k, scal3D, dt)
end do
end do
......@@ -320,11 +301,10 @@ end subroutine advecX_calc_no_com
!! @param[in,out] scal3D = scalar field to advect
subroutine advecY_calc(dt, Vy, scal3D)
use advecY ! Procedure specific to advection along Y
use advecY, only : advecY_init ! Procedure specific to advection along Y
use advec_common ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advec_remesh_line
! Input/Output
real(WP), intent(in) :: dt
......@@ -365,7 +345,7 @@ subroutine advecY_calc(dt, Vy, scal3D)
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
call advecY_remesh(direction, ind_group, gsY, p_pos_adim, p_V, i, k, scal3D, dt)
call advec_remesh(direction, ind_group, gsY, p_pos_adim, p_V, i, k, scal3D, dt)
end do
end do
......@@ -382,11 +362,10 @@ end subroutine advecY_calc
!! @param[in,out] scal3D = scalar field to advect
subroutine advecZ_calc(dt, Vz, scal3D)
use advecZ, only : advecZ_init_group ! procdure devoted to advection along Z
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advecZ ! procdure devoted to advection along Z
use advec_common ! some procedures common to advection along all directions
use advec_remesh_line
! Input/Output
real(WP), intent(in) :: dt
......@@ -426,7 +405,7 @@ subroutine advecZ_calc(dt, Vz, scal3D)
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
call advecZ_remesh(direction, ind_group, gsZ, p_pos_adim, p_V, i,j,scal3D, dt)
call advec_remesh(direction, ind_group, gsZ, p_pos_adim, p_V, i,j,scal3D, dt)
end do
end do
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
!> @addtogroup part
!! @{
!------------------------------------------------------------------------------
!
! MODULE: advec
!
!
! DESCRIPTION:
!> The module advec provides all public interfaces to solve an advection equation
!! with a particle method.
!
!> @details
!! This module contains the generic procedure to initialize and parametrise the
!! advection solver based on particles method. It also contains the subroutine
!! "advec_step" wich solves the equation for a given time step. It is the only one
!! module which is supposed to be included by a code using this library of
!! particle methods.
!
!> @author
!! Jean-Baptiste Lagaert, LEGI
!
!------------------------------------------------------------------------------
module advec_Vect
use precision_tools
use advec_abstract_proc
implicit none
! ===== Private variables =====
!> numerical method use to advect the scalar
character(len=str_short), private :: type_part_solv
!> dimensionnal splitting (eg classical, Strang or particle)
character(len=str_short), private :: dim_splitting
!> Group size along current direction
integer, private, dimension(2) :: gsX, gsY, gsZ
! ===== Public procedures =====
! Scheme used to advec the scalar (order 2 or 4 ?)
! public :: type_part_solver
! Advection methods
! public :: advec_init ! initialize the scalar solver
public :: advec_step_Vect ! advec the scalar field during a time step.
! procedure(advec_step_Torder2), pointer, public :: advec_step => null()
! public :: advec_step_Torder1 ! advec the scalar field during a time step.
! public :: advec_step_Torder2 ! advec the scalar field during a time step.
!
! Remeshing formula
procedure(AC_remesh), pointer, private :: advec_remesh_bis => null()
contains
! ===== Public methods =====
!> Return the name of the particle method used for the advection
!! @return type_part_solver = numerical method used for advection
function type_part_solver()
character(len=str_short) :: type_part_solver
type_part_solver = type_part_solv
end function
!> Initialise the particle advection methods
!! @param[in] order = to choose the remeshing method (and thus the order)
!! @param[out] stab_coeff = stability coefficient (condition stability is
!! dt< stab_coeff/norm_inf(V))
!! @param[in] dim_split = dimensionnal splitting (eg classical,
!! Strang splitting or particle splitting)
!! @param[in] verbosity = to display info about chosen remeshing formula (optional)
subroutine advec_init_Vect(order, stab_coeff, verbosity, dim_split)
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advecX ! solver for advection along X
use advecY ! solver for advection along Y
use advecZ ! solver for advection along Z
use advec_common ! some procedures common to advection along all directions
! Input/Output
character(len=*), optional, intent(in) :: order, dim_split
logical, optional, intent(in) :: verbosity
real(WP), optional, intent(out) :: stab_coeff
! Use default solver if it is not chosen by the user.
if(present(order)) then
type_part_solv = order
else
type_part_solv = 'p_O2'
end if
! Initialize the solver
if (present(verbosity)) then
call AC_solver_init(type_part_solv, verbosity)
else
call AC_solver_init(type_part_solv)
end if
if (present(stab_coeff)) stab_coeff = 1.0/(dble(bl_size))
! Call the right remeshing formula
select case(type_part_solv)
case('p_O2')
advec_remesh_bis => AC_remesh_lambda_group
case('p_O4')
advec_remesh_bis => AC_remesh_lambda_group
case('p_L2')
advec_remesh_bis => AC_remesh_limit_lambda_group
case('p_M6')
advec_remesh_bis => AC_remesh_Mprime_group
case('p_M8')
advec_remesh_bis => AC_remesh_Mprime_group
case default
advec_remesh_bis => AC_remesh_lambda_group
end select
call AC_setup_init()
! Save group size
gsX =group_size(1,:)
gsY =group_size(2,:)
gsZ =group_size(3,:)
end subroutine advec_init_Vect
!> Solve advection equation - order 2 in time (order 2 dimensional splitting)
!! @param[in] dt = time step
!! @param[in] Vx = velocity along x (could be discretised on a bigger mesh then the scalar)
!! @param[in] Vy = velocity along y
!! @param[in] Vz = velocity along z
!! @param[in,out] scal = scalar field to advect
subroutine advec_step_Vect(dt, Vx, Vy, Vz, scal_Vector)
use advecX ! Method to advec along X
use advecY ! Method to advec along Y
use advecZ ! Method to advec along Z
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(:,:,:), intent(in) :: Vx, Vy, Vz
real(WP), dimension(:,:,:,:), intent(inout) :: scal_Vector
call advecX_calc_Vect(dt/2.0, Vx, scal_Vector)
call advecY_calc_Vect(dt/2.0, Vy, scal_Vector)
call advecZ_calc_Vect(dt/2.0, Vz, scal_Vector)
call advecZ_calc_Vect(dt/2.0, Vz, scal_Vector)
call advecY_calc_Vect(dt/2.0, Vy, scal_Vector)
call advecX_calc_Vect(dt/2.0, Vx, scal_Vector)
end subroutine advec_step_Vect
!> Scalar advection (this procedure call the right solver, depending on the simulation setup)
!! @param[in] dt = time step
!! @param[in] Vx = velocity along X (could be discretised on a bigger mesh then the scalar)
!! @param[in,out] scal_vect = scalar field to advect
subroutine advecX_calc_Vect(dt, Vx, scal_vect)
use advecX ! Procedure specific to advection along X
use advec_common ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in) :: Vx
real(WP), dimension(:,:,:,:), intent(inout) :: scal_vect
! Other local variables
integer, parameter :: direction =1 ! current direction
integer :: j,k ! indice of the currend mesh point
integer :: sca ! indice of the currend scalar field
integer, dimension(2) :: ind_group ! indice of the currend group of line (=(i,k) by default)
real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_pos_adim ! adimensionned particles position
real(WP),dimension(N_proc(direction),gsX(1),gsX(2)) :: p_V ! particles velocity
! Allocate send_ind_min/max
if(allocated(send_group_min)) deallocate(send_group_min)
allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
if(allocated(send_group_max)) deallocate(send_group_max)
allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
! Initialise the pointer for optimized remeshing
call AC_setup_alongX()
ind_group = 0
do k = 1, N_proc(3), gsX(2)
ind_group(2) = ind_group(2) + 1
ind_group(1) = 0
do j = 1, N_proc(2), gsX(1)
ind_group(1) = ind_group(1) + 1
! ===== Init particles =====
call advecX_init_group(Vx, j, k, gsX, p_pos_adim, p_V)
! ===== Advection =====
! -- Compute velocity (with a RK2 scheme) --
call AC_velocity_interpol_group(dt, direction, gsX, ind_group, p_pos_adim, p_V)
! -- Advec particles --
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
do sca = 1, size(scal_vect,4)
call advec_remesh_bis(direction, ind_group, gsX, &
& p_pos_adim, p_V, j, k, scal_Vect(:,:,:,sca), dt)
end do
end do
end do
end subroutine advecX_calc_Vect
!> Scalar advection along Y (this procedure call the right solver, depending on the simulation setup)
!! @param[in] dt = time step
!! @param[in] Vy = velocity along y (could be discretised on a bigger mesh then the scalar)
!! @param[in,out] scal3D = scalar field to advect
subroutine advecY_calc_Vect(dt, Vy, scal_vect)
use advecY ! Procedure specific to advection along Y
use advec_common ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in) :: Vy
real(WP), dimension(:,:,:,:), intent(inout) :: scal_vect
! Other local variables
integer, parameter :: direction =2 ! current direction
integer :: i,k ! indice of the currend mesh point
integer :: sca ! indice of the currend scalar field
integer, dimension(2) :: ind_group ! indice of the currend group of line (=(i,k) by default)
real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_pos_adim ! adimensionned particles position
real(WP),dimension(N_proc(direction),gsY(1),gsY(2)) :: p_V ! particles velocity
! Allocate send_ind_min/max
if(allocated(send_group_min)) deallocate(send_group_min)
allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
if(allocated(send_group_max)) deallocate(send_group_max)
allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
! Initialise the pointer for optimized remeshing
call AC_setup_alongY()
ind_group = 0
do k = 1, N_proc(3), gsY(2)
ind_group(2) = ind_group(2) + 1
ind_group(1) = 0
do i = 1, N_proc(1), gsY(1)
ind_group(1) = ind_group(1) + 1
! ===== Init particles =====
call advecY_init(Vy, i, k, gsY, p_pos_adim, p_V)
! ===== Advection =====
! -- Compute velocity (with a RK2 scheme) --
call AC_velocity_interpol_group(dt, direction, gsY, ind_group, p_pos_adim, p_V)
! -- Advec particles --
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
do sca = 1, size(scal_vect,4)
call advec_remesh_bis(direction, ind_group, gsY, p_pos_adim, p_V, i, k, scal_vect(:,:,:,sca), dt)
end do
end do
end do
end subroutine advecY_calc_Vect
!> Scalar advection alongZ (this procedure call the right solver, depending on the simulation setup)
!! @param[in] dt = time step
!! @param[in] Vz = velocity along y (could be discretised on a bigger mesh then the scalar)
!! @param[in,out] scal3D = scalar field to advect
subroutine advecZ_calc_Vect(dt, Vz, scal_vect)
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advecZ ! procdure devoted to advection along Z
use advec_common ! some procedures common to advection along all directions
! Input/Output
real(WP), intent(in) :: dt
real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(in) :: Vz
real(WP), dimension(:,:,:,:), intent(inout) :: scal_vect
! Other local variables
integer, parameter :: direction =3 ! current direction
integer :: i,j ! indice of the currend mesh point
integer :: sca ! indice of the currend scalar field
integer, dimension(2) :: ind_group ! indice of the currend group of line (=(i,k) by default)
real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2)) :: p_pos_adim ! adimensionned particles position
real(WP), dimension(N_proc(direction),gsZ(1),gsZ(2)) :: p_V ! particles velocity
! Allocate send_ind_min/max
if(allocated(send_group_min)) deallocate(send_group_min)
allocate(send_group_min(group_size(direction,1),group_size(direction,2)))
if(allocated(send_group_max)) deallocate(send_group_max)
allocate(send_group_max(group_size(direction,1),group_size(direction,2)))
! Initialise the pointer for optimized remeshing
call AC_setup_alongZ()
ind_group = 0
do j = 1, N_proc(2), gsZ(2)
ind_group(2) = ind_group(2) + 1
ind_group(1) = 0
do i = 1, N_proc(1), gsZ(1)
ind_group(1) = ind_group(1) + 1
! ===== Init particles =====
call advecZ_init_group(Vz, i, j, gsZ, p_pos_adim, p_V)
! ===== Advection =====
! -- Compute velocity (with a RK2 scheme) --
call AC_velocity_interpol_group(dt, direction, gsZ, ind_group, p_pos_adim, p_V)
! -- Advec particles --
p_pos_adim = p_pos_adim + dt*p_V/d_sc(direction)
! ===== Remeshing =====
do sca = 1, size(scal_vect,4)
call advec_remesh_bis(direction, ind_group, gsZ, p_pos_adim, p_V, i,j,scal_vect(:,:,:,sca), dt)
end do
end do
end do
end subroutine advecZ_calc_Vect
!> ===== Private procedure =====
end module advec_Vect
!> @}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -5,12 +5,12 @@
! MODULE: advec_correction
!
!
! DESCRIPTION:
!> The module ``advec_correction'' gather function and subroutines used to computed
! DESCRIPTION:
!> The module ``advec_correction'' gather function and subroutines used to computed
!! eventual correction or limitator if wanted. These tools are
!! independant from the direction.
!! @details
!! This module gathers functions and routines used to determine when correction
!! This module gathers functions and routines used to determine when correction
!! are required depending on the remeshing formula. It includes particle
!! type and tag (for corrected lambda schemes) and variation computation
!! for limitator.
......@@ -31,15 +31,11 @@
module advec_correction
use precision
use string
use advec_abstract_proc
use advec_remeshing_formula
implicit none
!----- Determine block type and tag particles -----
public :: AC_type_and_block_group
public :: AC_type_and_block_group
public :: AC_limitator_from_slopes
contains
......@@ -47,7 +43,7 @@ contains
! ==================== Bloc type and particles tag for corrected lambda schemes ====================
! ===========================================================================================================
!> Determine type (center or left) of each block and tagfor a complete group of
!> Determine type (center or left) of each block and tag for a complete group of
!! lines.
!! corrected remeshing formula are recquired.
!! @param[in] dt = time step
......@@ -56,7 +52,7 @@ contains
!! @param[in] ind_group = coordinate of the current group of lines
!! @param[in] p_V = particle velocity (along the current direction)
!! @param[out] bl_type = table of blocks type (center of left)
!! @param[out] bl_tag = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
!! @param[out] bl_tag = inform about tagged particles (bl_tag(ind_bl)=1 if the end of the bl_ind-th block
!! and the begining of the following one is tagged)
!! @details
!! This subroutine work on a groupe of line. For each line of this group, it
......@@ -70,7 +66,7 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
use mpi
use cart_topology ! info about mesh and mpi topology
use advec_variables ! contains info about solver parameters and others.
use precision ! define working precision (double or simple)
use precision_tools ! define working precision_tools (double or simple)
real(WP), intent(in) :: dt ! time step
integer, intent(in) :: dir
......@@ -80,12 +76,12 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
logical,dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)),intent(out) :: bl_type ! is the particle block a center block or a left one ?
logical,dimension(bl_nb(dir),gp_s(1),gp_s(2)),intent(out) :: bl_tag ! indice of tagged particles
real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)) :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ; bl_lambdaMin = min of
real(WP),dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)) :: bl_lambdaMin ! for a particle, lamda = V*dt/dx ; bl_lambdaMin = min of
! lambda on a block (take also into account first following particle)
real(WP),dimension(gp_s(1),gp_s(2)) :: lambP, lambN ! buffer to exchange some lambda min with other processus
real(WP),dimension(gp_s(1),gp_s(2)) :: lambB, lambE ! min value of lambda of the begin of the line and at the end of the line
integer, dimension(bl_nb(dir)+1,gp_s(1),gp_s(2)) :: bl_ind ! block index : integer as lambda in (bl_ind,bl_ind+1) for a left block
! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block
! and lambda in (bl_ind-1/2, bl_ind+1/2) for a right block
integer :: ind,i_p ! some indices
real(WP) :: cfl ! = d_sc
integer, dimension(2) :: send_request ! mpi status of nonblocking send
......@@ -105,14 +101,14 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
! Receive ghost value, ie value from neighbors boundaries.
tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
call mpi_Irecv(lambN(1,1), com_size, MPI_DOUBLE_PRECISION, &
& neighbors(dir,2), tag_table(1), D_comm(dir), rece_request(1), ierr)
& neighbors(dir,2), tag_table(1), D_comm(dir), rece_request(1), ierr)
call mpi_Irecv(lambP(1,1), com_size, MPI_DOUBLE_PRECISION, &
& neighbors(dir,1), tag_table(2), D_comm(dir), rece_request(2), ierr)
& neighbors(dir,1), tag_table(2), D_comm(dir), rece_request(2), ierr)
! -- For the first block (1/2) --
! The domain contains only its second half => exchange ghost with the previous processus
lambB = minval(p_V(1:(bl_size/2)+1,:,:),1)*cfl
tag_table = compute_tag(ind_group, tag_part_tag_NP, dir)
!tag_table = compute_tag(ind_group, tag_part_tag_NP, dir) ! Tag table is already equals to this.
! Send message
call mpi_ISsend(lambB(1,1), com_size, MPI_DOUBLE_PRECISION, &
& neighbors(dir,1), tag_table(1), D_comm(dir), send_request(1), ierr)
......@@ -149,7 +145,6 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
bl_type = (bl_lambdaMin<dble(bl_ind))
! ===== Tag particles =====
do ind = 1, bl_nb(dir)
bl_tag(ind,:,:) = ((bl_ind(ind,:,:)/=bl_ind(ind+1,:,:)) .and. &
& (bl_type(ind,:,:).neqv.bl_type(ind+1,:,:)))
......@@ -161,6 +156,109 @@ subroutine AC_type_and_block_group(dt, dir, gp_s, ind_group, p_V, &
end subroutine AC_type_and_block_group
!> Compute a limitator function from scalar slope - only for corrected lambda 2 formula.
!! @param[in] gp_s = size of a group (ie number of line it gathers along the two other directions)
!! @param[in] ind_group = coordinate of the current group of lines
!! @param[in] p_pos = particles position
!! @param[in] scalar = scalar advected by particles
!! @param[out] limit = limitator function
!! @details
!! This subroutine work on a groupe of line. For each line of this group, it
!! determine the type of each block of this line and where corrected remeshing
!! formula are required. In those points, it tagg block transition (ie the end of
!! the current block and the beginning of the following one) in order to indicate
!! that corrected weigth have to be used during the remeshing.
!! Note that the subroutine actually computes limitator/8 as this is the
!! expression which is used inside the remeshing formula and directly computes it
!! minimize the number of operations.
subroutine AC_limitator_from_slopes(direction, gp_s, p_pos, &
& deltaS, limit, tag_mpi, com_size)
use mpi
use cart_topology ! info about mesh and mpi topology
use advec_variables ! contains info about solver parameters and others.
use precision_tools ! define working precision_tools (double or simple)
integer :: direction ! current direction
integer, dimension(2),intent(in) :: gp_s ! groupe size
real(WP), dimension(:,:,:), intent(in) :: p_pos ! particle position
real(WP), dimension(:,:,:), intent(in) :: deltaS ! scalar slope: scalar(i+1)-scalar(i) - for i=1 N_proc+1
real(WP), dimension(:,:,:), intent(out) :: limit ! limitator function
integer, intent(in) :: tag_mpi ! tag for mpi message
integer, intent(in) :: com_size ! size of mpi message
! Local variables
real(WP),dimension(2,gp_s(1),gp_s(2)) :: Sbuffer, Rbuffer ! buffer to exchange scalar or limitator at boundaries with neighbors.
integer :: ind ! loop indice on particle indice
real(WP),dimension(gp_s(1),gp_s(2)) :: afl ! = cfl - [cfl] where [] denotes the nearest int.
! integer,dimension(gp_s(1),gp_s(2)) :: afl_sign ! = sign of afl, ie 1 if afl>=0, -1 if afl<0
integer :: send_request ! mpi status of nonblocking send
integer, dimension(MPI_STATUS_SIZE) :: rece_status ! mpi status (for mpi_wait)
integer, dimension(MPI_STATUS_SIZE) :: send_status ! mpi status (for mpi_wait)
integer :: ierr ! mpi error code
! ===== Compute slope and limitator =====
! Van Leer limitator function (limit = limitator/8)
! -- For the "middle" and the "last" block --
do ind = 2, N_proc(direction)
where(deltaS(:,:,ind)/=0)
afl = p_pos(ind,:,:)
afl = afl - nint(afl)
! afl_sign = int(sign(1._WP,afl))
! limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl_sign*afl+0.5_WP)**2)*(deltaS(:,:,ind-afl_sign)/deltaS(:,:,ind))/(1+(deltaS(:,:,ind-afl_sign)/deltaS(:,:,ind)))
! If (p_pos-nint(p_pos))>=0)
where(afl>=0)
limit(ind+1,:,:) = max(0._WP,(deltaS(:,:,ind-1)/deltaS(:,:,ind)))
limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl+0.5_WP)**2)*limit(ind+1,:,:)
elsewhere
limit(ind+1,:,:) = max(0._WP,(deltaS(:,:,ind+1)/deltaS(:,:,ind)))
limit(ind+1,:,:) = limit(ind+1,:,:)/(limit(ind+1,:,:)+1)
limit(ind+1,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl-0.5_WP)**2)*limit(ind+1,:,:)
end where
elsewhere
limit(ind+1,:,:) = 0.0_WP
end where
end do
! -- For the "first" block --
! 1 - limit(1) - limitator at 1/2 is already compute on the previous mpi-rank (limit(N_proc+1) !)
! 2 - limit(2) - limitator at 1+1/2 requires deltaS(0) = scalar slope between scalar(0) and scalar(-1) which is already compute on previous rank
! Send these values
Sbuffer(1,:,:) = limit(N_proc(direction)+1,:,:)
Sbuffer(2,:,:) = deltaS(:,:,N_proc(direction))
call mpi_ISsend(Sbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
& neighbors(direction,2), tag_mpi, D_comm(direction), send_request, ierr)
! Receive it !
call mpi_recv(Rbuffer(1,1,1), com_size, MPI_DOUBLE_PRECISION, &
& neighbors(direction,1), tag_mpi, D_comm(direction),rece_status, ierr)
! Get limit(1) = limitator at 1/2
limit(1,:,:) = Rbuffer(1,:,:)
! Get limit(2) = limitator at 1+1/2
where(deltaS(:,:,1)/=0)
afl = p_pos(1,:,:)
afl = afl - nint(afl)
! If (p_pos-nint(p_pos))>=0)
where(afl>=0)
limit(2,:,:) = max(0._WP,(Rbuffer(2,:,:)/deltaS(:,:,1)))
! = ( deltaS(:,:,0)/deltaS(:,:,1))
limit(2,:,:) = limit(2,:,:)/(1+limit(2,:,:))
limit(2,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl+0.5_WP)**2)*limit(2,:,:)
elsewhere
limit(2,:,:) = max(0._WP,(deltaS(:,:,2)/deltaS(:,:,1)))
limit(2,:,:) = limit(2,:,:)/(1+limit(2,:,:))
limit(2,:,:) = (4.0_WP/8._WP)*min(0.9_WP,(afl-0.5_WP)**2)*limit(2,:,:)
end where
elsewhere
limit(2,:,:) = 0.0_WP
end where
! Classical (corrected) lambda formula: limitator function = 1
! limit = 1._WP/8._WP
end module advec_correction
! ===== Close mpi_ISsend when done =====
call mpi_wait(send_request, send_status, ierr)
end subroutine AC_limitator_from_slopes
end module advec_correction
......@@ -5,19 +5,19 @@
! MODULE: advecX_line
!
!
! DESCRIPTION:
! DESCRIPTION:
!> The module advecX_line is devoted to the simplest implementation of
!! advection along X axis of a scalar field.
!
!> @details
!> The module advecX_line is devoted to the simplest implementation of
!! advection along X axis of a scalar field. It is an unoptimized
!! advection along X axis of a scalar field. It is an unoptimized
!! version, useful to understand the basis and to benchmark the
!! optimisation done.
!! It used particle method and provides a parallel implementation.
!!
!! This module can use the method and variables defined in the module
!! "advec_common" which gather information and tools shared for advection along
!! "advec_common_line" which gather information and tools shared for advection along
!! x, y and z-axis.
!!
!! The module "test_advec" can be used in order to validate the procedures
......@@ -30,7 +30,7 @@
module advecX_line
use precision
use precision_tools
use advec_abstract_proc
implicit none
......@@ -47,9 +47,9 @@ module advecX_line
! ===== Private variable ====
! particles solver with different remeshing formula
integer, dimension(2), private :: gpX_size
integer, dimension(2), private :: gpX_size
!> Current direction = along X
integer, private, parameter :: direction=1
integer, private, parameter :: direction=1
!> Group size along current direction
!integer, private, dimension(2) :: gs
......@@ -71,9 +71,9 @@ contains
!! @param[in,out] scal3D = scalar field to advect
subroutine advecX_calc_line(dt,Vx,scal3D)
use advec_common ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use advec_common_line ! Some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
real(WP), intent(in) :: dt
......@@ -136,10 +136,10 @@ end subroutine advecX_calc_line
!! @param[in,out] scal = scalar field to advect
subroutine Xremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
use advec_common ! Some procedures common to advection along all directions
use advec_common_line ! Some procedures common to advection along all directions
use advec_remeshing_line ! Remeshing formula
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
integer, dimension(2), intent(in) :: ind_group
......@@ -148,14 +148,14 @@ subroutine Xremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
logical, dimension(:), intent(in) :: bl_tag
real(WP), dimension(:), intent(in) :: p_pos_adim
real(WP), dimension(N_proc(1), N_proc(2), N_proc(3)), intent(inout) :: scal
! Other local variables
! Other local variables
! Variable used to remesh particles in a buffer
real(WP),dimension(:),allocatable :: send_buffer ! buffer use to remesh the scalar before to send it to the right subdomain
integer, dimension(2) :: rece_proc ! minimal and maximal gap between my Y-coordinate and the one from which
! I will receive data
integer :: proc_min ! smaller gap between me and the processes to where I send data
integer :: proc_max ! smaller gap between me and the processes to where I send data
! -- Compute ranges --
if (bl_type(1)) then
......@@ -182,7 +182,7 @@ subroutine Xremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,j,k,scal)
! -- Remesh the particles in the buffer --
call AC_remesh_lambda2corrected_basic(direction, p_pos_adim, scal(:,j,k), bl_type, bl_tag, send_j_min, send_j_max, send_buffer)
! -- Send the buffer to the matching processus and update the scalar field --
scal(:,j,k) = 0
call AC_bufferToScalar_line(direction, ind_group , send_j_min, send_j_max, proc_min, proc_max, &
......@@ -203,7 +203,7 @@ end subroutine Xremesh_O2_line
!! @param[in] j = Y-indice of the current line
!! @param[in] k = Z-indice of the current line
!! @param[out] p_pos_adim = adimensioned particles postion
!! @param[out] p_V = particle velocity
!! @param[out] p_V = particle velocity
subroutine advecX_init_line(Vx, j, k, p_pos_adim, p_V)
use cart_topology ! Description of mesh and of mpi topology
......
......@@ -6,19 +6,19 @@
! MODULE: advecY_line
!
!
! DESCRIPTION:
! DESCRIPTION:
!> The module advecY_line is devoted to the simplest implementation of
!! advection along Y axis of a scalar field.
!
!> @details
!> The module advecY_line is devoted to the simplest implementation of
!! advection along Y axis of a scalar field. It is an unoptimized
!! advection along Y axis of a scalar field. It is an unoptimized
!! version, useful to understand the basis and to benchmark the
!! optimisation done.
!! It used particle method and provides a parallel implementation.
!!
!! This module can use the method and variables defined in the module
!! "advec_common" which gather information and tools shared for advection along
!! "advec_common_line" which gather information and tools shared for advection along
!! x, y and z-axis.
!!
!! The module "test_advec" can be used in order to validate the procedures
......@@ -31,7 +31,7 @@
module advecY_line
use precision
use precision_tools
use advec_abstract_proc
implicit none
......@@ -68,8 +68,8 @@ contains
!! @param[in,out] scal3D = scalar field to advect
subroutine advecY_calc_line(dt,Vy,scal3D)
use advec_common ! some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use advec_common_line ! some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! description of mesh and of mpi topology
! input/output
......@@ -131,10 +131,10 @@ end subroutine advecY_calc_line
!! @param[in,out] scal = scalar field to advect
subroutine Yremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,k,scal)
use advec_common ! Some procedures common to advection along all directions
use advec_common_line ! Some procedures common to advection along all directions
use advec_remeshing_line ! Remeshing formula
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
integer, dimension(2), intent(in) :: ind_group
......
......@@ -18,7 +18,7 @@
!! It used particle method and provides a parallel implementation.
!!
!! This module can use the method and variables defined in the module
!! "advec_common" which gather information and tools shared for advection along
!! "advec_common_line" which gather information and tools shared for advection along
!! x, y and z-axis.
!!
!! The module "test_advec" can be used in order to validate the procedures
......@@ -31,7 +31,7 @@
module advecZ_line
use precision
use precision_tools
use advec_abstract_proc
implicit none
......@@ -66,7 +66,7 @@ contains
!! @param[in,out] scal3D = scalar field to advect
subroutine advecZ_calc_line(dt,Vz,scal3D)
use advec_common ! some procedures common to advection along all directions
use advec_common_line ! some procedures common to advection along all directions
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
......@@ -129,10 +129,10 @@ end subroutine advecZ_calc_line
!! @param[in,out] scal = scalar field to advect
subroutine Zremesh_O2_line(ind_group, p_pos_adim, bl_type, bl_tag,i,j,scal)
use advec_common ! Some procedures common to advection along all directions
use advec_common_line ! Some procedures common to advection along all directions
use advec_remeshing_line ! Remeshing formula
use advec_variables ! contains info about solver parameters and others.
use cart_topology ! Description of mesh and of mpi topology
use cart_topology ! Description of mesh and of mpi topology
! Input/Output
integer, dimension(2), intent(in) :: ind_group
......
......@@ -5,19 +5,19 @@
! MODULE: advec_common_line
!
!
! DESCRIPTION:
! DESCRIPTION:
!> The module ``advec_common_line'' gather function and subroutines used to advec scalar
!! which are not specific to a direction. It contains some ``old''
!! functions from ``advec_common'' which are not optimized.
!! @details
!! This module gathers functions and routines used to advec scalar which are not
!! specific to a direction. More precisly, it provides function similar to
!! specific to a direction. More precisly, it provides function similar to
!! ``advec_common'' but which only work on single line rather than of
!! group line. Considering how mpi parallelism works, working on single
!! line are not opptimal. Therefore, these function are onbly here for
!! debbugging and testing purposes. They also could be used to compute
!! some spped-up. They are more simple and basic but less efficients.
!!
!!
!! This module is automatically load when advec_common is used.
!! Moreover, advec_common contains all interface to automatically use
!! the right function whenever you want work on single line or on group of
......@@ -35,11 +35,11 @@
!! Jean-Baptiste Lagaert, LEGI
!
!------------------------------------------------------------------------------
module advec_common_line
use precision
use string
use precision_tools
use structure_tools
implicit none
......@@ -447,7 +447,7 @@ subroutine AC_type_and_block_line(dt, direction, ind_group, p_V, &
use mpi
use cart_topology
use advec_variables
use precision
use precision_tools
! In/Out variables
real(WP), intent(in) :: dt ! time step
......
This diff is collapsed.
......@@ -5,13 +5,13 @@
! MODULE: advec_abstract_proc
!
!
! DESCRIPTION:
! DESCRIPTION:
!> The module ``advec_abstract_procedure'' gather all user abstract procedure that are used by the different advection
!! modules. It allow to share that function/procediure profile and to safetly use procedural argument or pointer.
!!
!! This module is not supposed to be used by the main code but only by the other advection module.
!! More precisly, a final user must only used the generic "advec" module wich contains all the interface
!! to initialize the solver (eg choosing the remeshing formula and the dimension splitting) and to solve
!!
!! This module is not supposed to be used by the main code but only by the other advection module.
!! More precisly, a final user must only used the generic "advec" module wich contains all the interface
!! to initialize the solver (eg choosing the remeshing formula and the dimension splitting) and to solve
!! the advection equation with the particle method.
!!
!
......@@ -33,48 +33,98 @@ module advec_abstract_proc
abstract interface
subroutine AC_remesh(direction, ind_group, gs, p_pos_adim, p_V, j, k, scal, dt)
use cart_topology
use precision_tools
implicit none
! Input/Output
integer, intent(in) :: direction
integer, dimension(2), intent(in) :: ind_group
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
real(WP), dimension(:,:,:), intent(in) :: p_V ! particles velocity
real(WP), dimension(:,:,:), intent(inout) :: scal
real(WP), intent(in) :: dt
! Input/Output
integer, intent(in) :: direction
integer, dimension(2), intent(in) :: ind_group
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
real(WP), dimension(:,:,:), intent(in) :: p_V ! particles velocity
real(WP), dimension(:,:,:), intent(inout) :: scal
real(WP), intent(in) :: dt
end subroutine AC_remesh
end interface
! --- Abstract profile of subroutine used to remesh scalar inside a buffer ---
! --- Abstract profile of subroutine used to compute limitator function ---
! Note that such a function actually computes limitator/8 as it is always
! this fraction which appears in the remeshing polynoms (and thsu directly
! divided limitator function by 8 avoids to have to do it several times later)
abstract interface
!! @param[in] gp_s = size of a group (ie number of line it gathers along the two other directions)
!! @param[in] ind_group = coordinate of the current group of lines
!! @param[in] p_pos = particles position
!! @param[in] scalar = scalar advected by particles
!! @param[out] limit = limitator function
subroutine advec_limitator_group(gp_s, ind_group, j, k, p_pos, &
& scalar, limit)
use precision_tools
implicit none
integer, dimension(2),intent(in) :: gp_s ! groupe size
integer, dimension(2), intent(in) :: ind_group ! group indice
integer , intent(in) :: j,k ! bloc coordinates
real(WP), dimension(:,:,:), intent(in) :: p_pos ! particle position
real(WP), dimension(:,:,:), intent(in) :: scalar ! scalar field to advect
real(WP), dimension(:,:,:), intent(out) :: limit ! limitator function
end subroutine advec_limitator_group
end interface
! --- Abstract profile of subroutine used to remesh scalar inside a buffer - lambda formula ---
abstract interface
subroutine remesh_in_buffer_type(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
& scalar, buffer, pos_in_buffer)
use precision_tools
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_min
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
logical, dimension(:,:,:), intent(in) :: bl_type ! is the particle block a center block or a left one ?
logical, dimension(:,:,:), intent(in) :: bl_tag ! indice of tagged particles
integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information
integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the initial scalar field transported by particles
real(WP),dimension(:), intent(out), target :: buffer ! buffer where particles are remeshed
integer, dimension(:), intent(inout) :: pos_in_buffer ! describe how the one dimensionnal array "buffer" are split
! in part corresponding to different processes
end subroutine remesh_in_buffer_type
end interface
! --- Abstract profile of subroutine used to remesh scalar inside a buffer - limited lambda formula ---
abstract interface
subroutine remesh_in_buffer(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, send_min, send_max, &
& scalar, remesh_line, buffer, pos_in_buffer)
use precision
use advec_remeshing_formula
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_min
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
logical, dimension(:,:,:), intent(in) :: bl_type ! is the particle block a center block or a left one ?
logical, dimension(:,:,:), intent(in) :: bl_tag ! indice of tagged particles
integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information
integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the initial scalar field transported by particles
procedure(AC_remesh_line_pter), pointer, intent(in) :: remesh_line ! subroutine wich remesh a line of particle with
! the right remeshing formula
real(WP),dimension(:), intent(out), target :: buffer ! buffer where particles are remeshed
integer, dimension(:), intent(inout) :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
! in part corresponding to different processes
end subroutine remesh_in_buffer
subroutine remesh_in_buffer_limit(gs, j, k, ind_min, p_pos_adim, bl_type, bl_tag, limit,&
& send_min, send_max, scalar, buffer, pos_in_buffer)
use precision_tools
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_min
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
logical, dimension(:,:,:), intent(in) :: bl_type ! is the particle block a center block or a left one ?
logical, dimension(:,:,:), intent(in) :: bl_tag ! indice of tagged particles
real(WP), dimension(:,:,:), intent(in) :: limit ! limitator function (divided by 8)
integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information
integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the initial scalar field transported by particles
real(WP),dimension(:), intent(out), target :: buffer ! buffer where particles are remeshed
integer, dimension(:), intent(inout) :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
! in part corresponding to different processes
end subroutine remesh_in_buffer_limit
end interface
! --- Abstract profile of subroutine used to remesh scalar inside a buffer - variant with no type/tag ---
......@@ -82,21 +132,21 @@ module advec_abstract_proc
subroutine remesh_in_buffer_notype(gs, j, k, ind_min, p_pos_adim, send_min, send_max, &
& scalar, buffer, pos_in_buffer)
use precision
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_min
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information
integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the initial scalar field transported by particles
! the right remeshing formula
real(WP),dimension(:), intent(out), target :: buffer ! buffer where particles are remeshed
integer, dimension(:), intent(inout) :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
! in part corresponding to different processes
use precision_tools
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_min
real(WP), dimension(:,:,:), intent(in) :: p_pos_adim ! adimensionned particles position
integer, dimension(:,:), intent(in) :: send_min ! distance between me and processus wich send me information
integer, dimension(:,:), intent(in) :: send_max ! distance between me and processus wich send me information
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the initial scalar field transported by particles
! the right remeshing formula
real(WP),dimension(:), intent(out), target :: buffer ! buffer where particles are remeshed
integer, dimension(:), intent(inout) :: pos_in_buffer! describe how the one dimensionnal array "buffer" are split
! in part corresponding to different processes
end subroutine remesh_in_buffer_notype
end interface
......@@ -106,21 +156,21 @@ module advec_abstract_proc
abstract interface
subroutine remesh_buffer_to_scalar(gs, j, k, ind_proc, gap, begin_i1, cartography, buffer, scalar, beg_buffer)
use precision
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_proc ! to read the good cartography associate to the processus which send me the buffer.
integer,intent(in) :: gap ! gap between my local indices and the local indices from another processes
integer, intent(in) :: begin_i1 ! indice corresponding to the first place into the cartography
! array where indice along the the direction of the group of lines are stored.
integer, dimension(:,:), intent(in) :: cartography
real(WP),dimension(:), intent(in) :: buffer ! buffer containing the data to redistribute into the local scalar field.
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the scalar field.
integer, intent(out) :: beg_buffer ! first indice inside where the scalar values are stored into the buffer
! for the current sender processus. To know where reading data into the buffer.
use precision_tools
implicit none
! Input/Output
integer, dimension(2), intent(in) :: gs
integer, intent(in) :: j, k
integer, intent(in) :: ind_proc ! to read the good cartography associate to the processus which send me the buffer.
integer,intent(in) :: gap ! gap between my local indices and the local indices from another processes
integer, intent(in) :: begin_i1 ! indice corresponding to the first place into the cartography
! array where indice along the the direction of the group of lines are stored.
integer, dimension(:,:), intent(in) :: cartography
real(WP),dimension(:), intent(in) :: buffer ! buffer containing the data to redistribute into the local scalar field.
real(WP), dimension(:,:,:), intent(inout) :: scalar ! the scalar field.
integer, intent(out) :: beg_buffer ! first indice inside where the scalar values are stored into the buffer
! for the current sender processus. To know where reading data into the buffer.
end subroutine remesh_buffer_to_scalar
end interface
......
This diff is collapsed.
!> @addtogroup toolbox
!! @{
!------------------------------------------------------------------------------
!
! MODULE: precision
......@@ -10,7 +12,7 @@
!> representation in the code. It is set to double precision for REAL.
!------------------------------------------------------------------------------
MODULE precision
MODULE precision_tools
IMPLICIT NONE
INTEGER, PARAMETER :: SP = kind(1.0)
......@@ -20,9 +22,15 @@ MODULE precision
REAL(WP), PARAMETER :: MAX_REAL_WP = HUGE(sample_real_at_WP)
INTEGER, PRIVATE :: sample_int
INTEGER, PARAMETER :: MAX_INTEGER = HUGE(sample_int)
INTEGER, PARAMETER :: DI = selected_int_kind(r=12)
!> the MPI type for REAL exchanges in simple or double precision
INTEGER, PUBLIC :: MPI_REAL_WP
INTEGER, PUBLIC :: MPI_REAL_WP
!> the MPI type for COMPLEX exchanges in simple or double precision
INTEGER, PUBLIC :: MPI_COMPLEX_WP
INTEGER, PUBLIC :: MPI_COMPLEX_WP
!> the string size
INTEGER, PARAMETER :: str_short = 8
INTEGER, PARAMETER :: str_medium = 64
INTEGER, PARAMETER :: str_long = 4096
END MODULE precision
END MODULE precision_tools
!> @}
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment