diff --git a/HySoP/CMakeLists.txt b/HySoP/CMakeLists.txt index 3d665d59ddd1d4609b3252e2e4217debe40ac33e..ff46df78221fc4c2b0471a4a92581c1ea0f3e02a 100644 --- a/HySoP/CMakeLists.txt +++ b/HySoP/CMakeLists.txt @@ -50,7 +50,7 @@ set(EXE_NAME ${PROJECT_NAME}Run) set(${PROJECT_LIBRARY_NAME}_SRCDIRS src src/interfaces/Fortran2Cpp - src/interfaces/ppm +# src/interfaces/ppm ) # A main file to create an executable (test purpose) # Any files in these dirs will be used to create Parmes exec (linked with libparmes) diff --git a/HySoP/ParmesConfig.cmake.in b/HySoP/ParmesConfig.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..817a3f7c4a77b8ad7a3736c057d7301981d7a828 --- /dev/null +++ b/HySoP/ParmesConfig.cmake.in @@ -0,0 +1,38 @@ +# - config file for @PACKAGE_NAME@ package +# Written by F. Pérignon, 2011 march +# +# This file generates @PACKAGE_NAME@Config.cmake, that may be used by another cmake project +# to retrieve all the configuration variables from @PACKAGE_NAME@ +# +# It defines the following variables +# +# @PACKAGE_NAME@_INCLUDE_DIRS - include directories for ppmcore +# @PACKAGE_NAME@_EXTRA_INCLUDE_DIRS - path to extra headers needed for @PACKAGE_NAME@ (metis.h ...) +# @PACKAGE_NAME@_LIBRARY_DIRS - path to @PACKAGE_NAME@ library(ies) +# @PACKAGE_NAME@_LIBRARIES - libraries to link against to use ppmcore +# @PACKAGE_NAME@_USE_XXX - value of option "USE_XXX" (for example USE_MPI, USE_Metis ... = ON or OFF) + +# Tell the user where to find ppmcore headers +# Tell the user project where to find our headers and libraries +set(@PACKAGE_NAME@_INCLUDE_DIRS "${${PACKAGE_NAME}_INCLUDE_DIRS}") +set(@PACKAGE_NAME@_EXTRA_INCLUDE_DIRS "${${PACKAGE_NAME}_EXTRA_INCLUDE_DIRS}") +set(@PACKAGE_NAME@_LIBRARY_DIRS "${${PACKAGE_NAME}_LIB_DIR}") +set(@PACKAGE_NAME@_MODULE_DIR "${${PACKAGE_NAME}_INCLUDE_DIRS}/Modules") + +# Our library dependencies (contains definitions for IMPORTED targets) +include("${${PACKAGE_NAME}_CMAKE_DIR}/@PACKAGE_NAME@LibraryDepends.cmake") + +# These are IMPORTED targets created by FooBarLibraryDepends.cmake +set(@PACKAGE_NAME@_LIBRARIES @PROJECT_LIBRARY_NAME@) + +# Set all @PACKAGE_NAME@ options +set(@PACKAGE_NAME@_USE_MPI @USE_MPI@) +set(@PACKAGE_NAME@_USE_PPM @USE_PPM@) +set(@PACKAGE_NAME@_USE_PPM_Numerics @USE_PPM_Numerics@) + + +# Set var for compilers used by @PACKAGE_NAME@ +#set(@PACKAGE_NAME@_Fortran_COMPILER @CMAKE_Fortran_COMPILER@) + +# Fortran flags +#set(@PACKAGE_NAME@_Fortran_FLAGS @CMAKE_Fortran_FLAGS@) diff --git a/HySoP/ParmesConfigVersion.cmake.in b/HySoP/ParmesConfigVersion.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..16fc03f5985e060fea9649b6acd3f2bba5f940bb --- /dev/null +++ b/HySoP/ParmesConfigVersion.cmake.in @@ -0,0 +1,12 @@ +set(PACKAGE_VERSION "@Parmes_version@") + + +# Check whether the requested PACKAGE_FIND_VERSION is compatible +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/HySoP/config.hpp.cmake b/HySoP/config.hpp.cmake index 9d6a48fbb53f0bfb0205a1f5eb51fcc9207b13d3..3b5277f6e40af1518518711adfacd48bbbfa33b2 100644 --- a/HySoP/config.hpp.cmake +++ b/HySoP/config.hpp.cmake @@ -2,7 +2,7 @@ #define PARMESCONFIG_HPP #define WITH_CMAKE -#cmakedefine WITH_MPI +#cmakedefine USE_MPI #cmakedefine DOUBLEPREC #endif diff --git a/HySoP/src/main/Plouhmans.f90 b/HySoP/src/Unstable/Plouhmans.f90 similarity index 100% rename from HySoP/src/main/Plouhmans.f90 rename to HySoP/src/Unstable/Plouhmans.f90 diff --git a/HySoP/src/callPPM.f90 b/HySoP/src/Unstable/callPPM.f90 similarity index 100% rename from HySoP/src/callPPM.f90 rename to HySoP/src/Unstable/callPPM.f90 diff --git a/HySoP/src/main/Domain.f90 b/HySoP/src/main/Domain.f90 index 46709a6bc07d73e7703f3cf978d3d2dd08a56cb6..7f6d864c3a61c379e658dee2471d0a451e315760 100755 --- a/HySoP/src/main/Domain.f90 +++ b/HySoP/src/main/Domain.f90 @@ -7,38 +7,59 @@ module Domain implicit none real(mk), dimension(:), pointer :: domain_minCoords => NULL(), domain_maxCoords=>NULL() + + real(mk), dimension(:), pointer :: lengths => NULL() + integer, dimension(:), pointer :: domain_bc=>NULL() integer, dimension(:), pointer :: domain_ghostsize=>NULL() integer, dimension(:), pointer :: grid_resolution =>NULL() real(mk), dimension(:), pointer :: grid_step =>NULL() - + real(mk), dimension(:), pointer :: boundary_layer_min => NULL(),boundary_layer_max => NULL() + integer, private :: istat contains subroutine init_geometry() + real(mk) :: zFolke + + zFolke = 1./0.4844 + ! Domain size and min/max coords - allocate(domain_minCoords(dime), domain_maxCoords(dime), stat = istat) + allocate(domain_minCoords(dime), domain_maxCoords(dime), lengths(dime),stat = istat) if(istat.ne.0) stop 'Geometry, allocation failed' - domain_minCoords = -1.1 - domain_maxCoords = 1.1 + allocate(boundary_layer_min(dime), boundary_layer_max(dime), stat = istat) + if(istat.ne.0) stop 'Geometry, allocation failed' + + + domain_minCoords(1) = 0.0 + domain_maxCoords(1) = 2.*zFolke + domain_minCoords(2) = -zFolke + domain_maxCoords(2) = zFolke + ! 0.3: boundary layer + domain_minCoords(3) = -zFolke - 0.3 + domain_maxCoords(3) = zFolke + 0.3 + ! Boundary conditions and ghosts allocate(domain_bc(2*dime), domain_ghostsize(dime), stat = istat) if(istat.ne.0) stop 'BC, allocation failed' domain_bc = ppm_param_bcdef_periodic domain_ghostsize = 2 - + boundary_layer_min = 0 + boundary_layer_max = 0 + lengths = domain_maxCoords - domain_minCoords end subroutine init_geometry subroutine init_grid() allocate(grid_resolution(dime),grid_step(dime),stat=istat) if(istat.ne.0) stop 'grid_resolution, allocation failed' - grid_resolution = 15 + grid_resolution = 65 grid_step = (domain_maxCoords - domain_minCoords)/(real(grid_resolution,mk)-1.) - + boundary_layer_min = 0 + boundary_layer_max = 0 end subroutine init_grid end module Domain diff --git a/HySoP/src/main/Fields.f90 b/HySoP/src/main/Fields.f90 index 0d7cc8fe7184f935515d9c7de797ad3016203eb1..6040ede39860e6b6f052a87e5f389ddca36e25b7 100755 --- a/HySoP/src/main/Fields.f90 +++ b/HySoP/src/main/Fields.f90 @@ -9,10 +9,12 @@ module Fields real(mk), dimension(:,:,:,:,:), pointer :: velocity => NULL() !> Vorticity real(mk), dimension(:,:,:,:,:), pointer :: vorticity =>NULL() - !> Stream function - !real(mk), dimension(:,:,:,:,:), pointer :: stream_function =>NULL() + !> Stream function - Test purpose. Useless if ppm fft solver works as it is supposed to ... + real(mk), dimension(:,:,:,:,:), pointer :: stream_function =>NULL() !> rhs of vorticity eq (i.e. stretch + diffusion terms) real(mk), dimension(:,:,:,:,:), pointer :: rhs =>NULL() + !> + real(mk), dimension(:,:,:,:,:), pointer :: vel_ex => NULL() contains @@ -39,6 +41,7 @@ contains isublist => topo%isublist nsublist = topo%nsublist + vndata(1) = maxval(ndata(1,isublist(1:nsublist))) vndata(2) = maxval(ndata(2,isublist(1:nsublist))) vndata(3) = maxval(ndata(3,isublist(1:nsublist))) @@ -53,8 +56,12 @@ contains if(istat.ne.0) stop 'Field allocation error for vorticity' ! rhs allocate(rhs(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat) + allocate(stream_function(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist), stat = istat) if(istat.ne.0) stop 'Field allocation error for stream_function' + ! + allocate(vel_ex(dime,ldl(1):ldu(1),ldl(2):ldu(2),ldl(3):ldu(3),nsublist) ) + nullify(isublist, ndata) end subroutine init_fields diff --git a/HySoP/src/main/FieldsComputation.f90 b/HySoP/src/main/FieldsComputation.f90 index 176bb02daa9ab5dd883cfebb31baba8f70d2dae1..70ce7f45c7de17eb6022d88b0b1585887c8d3066 100755 --- a/HySoP/src/main/FieldsComputation.f90 +++ b/HySoP/src/main/FieldsComputation.f90 @@ -1,14 +1,15 @@ module FieldsComputation use client_data, only: mk - + use client_topology, only: ppm_t_topo + use mpi !use Solver, only: solve_poisson implicit none private - public :: fit_velocity + public :: fit_velocity, computeFlowRate contains @@ -28,16 +29,62 @@ contains !!$ !!$ end subroutine compute_velocity - subroutine fit_velocity(vel, resolution, bc) + subroutine fit_velocity(vel, stepSize, lengths,topo,boundary_layer_min,boundary_layer_max) ! Velocity, intent(inout) real(mk), dimension(:,:,:,:,:), pointer :: vel - ! grid resolution, intent(in) - integer, dimension(:), pointer :: resolution - ! boundary conditions type, intent(in) - integer, dimension(:), pointer :: bc + ! grid step, intent(in) + real(mk), dimension(:), pointer :: stepSize + ! dims of the domain, intent(in) + real(mk), dimension(:), pointer :: lengths + real(mk), dimension(:), pointer :: boundary_layer_min,boundary_layer_max + + type(ppm_t_topo), pointer :: topo + + real(mk) :: theoretical_flowrate, computed_flowrate,surf,constant + + ! The flow is supposed to be along direction 1 + ! physical domain surface + surf = (boundary_layer_max(3)-boundary_layer_min(3))*lengths(2) + ! Value of the flow rate we want to fix + theoretical_flowrate = 1. + call computeFlowRate(vel, stepSize, topo,computed_flowrate) + ! Correction is added to velocity ... + constant = (boundary_layer_max(3)-boundary_layer_min(3))/lengths(3) !(theoretical_flowrate - computed_flowrate)/surf + print *, 'const', constant + + print *, computed_flowrate, '//',surf - ! TODO + vel(1,:,:,:,1) = vel(1,:,:,:,1) + constant + end subroutine fit_velocity + subroutine computeFlowRate(vel, stepSize, topo, flowRate) + + real(mk), dimension(:,:,:,:,:), pointer :: vel + ! grid step, intent(in) + real(mk), dimension(:), pointer :: stepSize + type(ppm_t_topo), pointer :: topo + real(mk), intent(out) :: flowRate + + integer, dimension(:,:), pointer :: nnodes => NULL() + integer :: isub, isubl,info + + nnodes => topo%mesh(1)%nnodes + + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + flowRate = vel(1,1,1,1,isub) + vel(1,1,1,nnodes(3,isubl),isub) +& + vel(1,1,nnodes(2,isubl),1,isub) + vel(1,1,nnodes(2,isubl),nnodes(3,isubl),isub) + flowRate = 0.25*flowRate + 0.5*(sum(vel(1,1,2:nnodes(2,isubl)-1,1,isub)) + & + sum(vel(1,1,2:nnodes(2,isubl)-1,nnodes(3,isubl),isub)) & + + sum(vel(1,1,1,2:nnodes(3,isubl)-1,isub)) + sum(vel(1,1,nnodes(2,isubl),2:nnodes(3,isubl)-1,isub))) & + + sum(vel(1,1,2:nnodes(2,isubl)-1,2:nnodes(3,isubl)-1,isub)) + flowRate = stepSize(2)*stepSize(3)*flowRate + enddo + + call MPI_AllReduce(flowRate, flowRate, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD,info) + + end subroutine computeFlowRate + end module FieldsComputation diff --git a/HySoP/src/main/NavierStokes3D.f90 b/HySoP/src/main/NavierStokes3D.f90 index a0d5a8b9ff994b4cadeb676ff821541af0385877..dc9f723e7ea241c54c201982968a81892d45bda9 100755 --- a/HySoP/src/main/NavierStokes3D.f90 +++ b/HySoP/src/main/NavierStokes3D.f90 @@ -12,21 +12,26 @@ module ppmNavierStokes3D ! Physical domain and grid use Domain ! Fields on the grid - use Fields, only: init_fields, velocity, vorticity, rhs + use Fields, only: init_fields, velocity, vorticity, rhs, vel_ex, stream_function ! Topology use client_topology, only: init_topo, topo ! Multigrid solver - use Solver, only : init_poisson_solver, solve_poisson + use Solver, only : init_poisson_solver, solve_poisson, ppm_poisson_drv_none, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4 - use penalisation, only : penalisation_init, penalise_velocity + use penalisation, only : penalisation_init, penalise_velocity, get_drag - use FieldsComputation, only : fit_velocity + use FieldsComputation, only : fit_velocity,computeFlowRate - use vectorcalculus, only: curldf4, computeRHS + use vectorcalculus, only: curldf4, computeRHS, norm1, norm2, normInf use Particles, only : init_particles, update_particles, push_particles, remesh + + use io_vtk use mpi + + use testsFunctions + !use WrapFort implicit none @@ -56,7 +61,7 @@ contains ! Init ppm !====================== prec = ppm_kind_double ! Defined in ppm_param.h - comm = MPI_COMM_WORLD ! + comm = MPI_COMM_WORLD debug = 0 tol = -10 info = -1 @@ -77,22 +82,25 @@ contains ! Fields allocation !====================== call init_fields(domain_ghostsize, topo) - !====================== ! Init Physics !====================== + ! set boundary layer(s) positions + boundary_layer_min(1:2) = domain_minCoords(1:2) + boundary_layer_max(1:2) = domain_maxCoords(1:2) + boundary_layer_min(3) = domain_minCoords(3) + 0.3 + boundary_layer_max(3) = domain_maxCoords(3) - 0.3!15.*grid_step(3) + velocity = 0.0_mk - vorticity = 1.0_mk + call init_vorticity(vorticity,topo,grid_step,boundary_layer_min,boundary_layer_max) - vorticity(3,10,10,10,:) = 50.0_mk - - call penalisation_init(topo, domain_ghostsize, grid_step) + call penalisation_init(topo,grid_step,boundary_layer_min,boundary_layer_max) !====================== ! Init Particles !====================== - call init_particles() + call init_particles(vorticity,topo%ID,topo%mesh(1)%ID,velocity) print *, "end of parmes:ppm:init_client" @@ -101,93 +109,118 @@ contains subroutine main_client() bind(c,name='NavierStokes3D') real(mk) :: initial_time, final_time, current_time, time_step - integer :: i, rank, j,k, npart + integer :: rank,iter, derive,max_iter!,nbProcs logical :: resetpos - real(mk) :: nu - - nu = 4.74833808D-03 + real(mk) :: nu,Re + ! real(mk), dimension(:,:), pointer :: err1, err2, errinf + real(mk), dimension(:), pointer :: drag =>NULL() + + Re = 133.4 + nu = 1./Re initial_time = 0.0 - final_time = 1.0 - time_step = 0.005 + final_time = 10.0 + time_step = 0.05 current_time = initial_time - - call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) - + iter = 1 + max_iter= 10 +! call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) +! call MPI_COMM_SIZE(MPI_COMM_WORLD,nbProcs,info) ! Initialisation stuff call init_client() - + !====================== ! Init solver !====================== ! Init multigrid works in ppm (or seems to ...) but solve fails. !call init_multigrid(topo%ID, topo%mesh(1)%ID, domain_ghostsize, domain_bc) + + call MPI_BARRIER(MPI_COMM_WORLD,info) - call init_poisson_solver(vorticity,velocity,topo%ID, topo%mesh(1)%ID) + ! Analytical solutions + !call rhs_analytic(topo, grid_step, stream_function,vel_ex,vorticity,nu) - call MPI_BARRIER(MPI_COMM_WORLD,info) - -!!$ print *, shape(topo%mesh) -!!$ do i=1, size(topo%mesh) -!!$ print *, 'mesh :', topo%mesh(i)%ID -!!$ end do -!!$ -!!$ do i=1, 3!size(ppm_topo) -!!$ print *, rank, ' topo: ', ppm_topo(i)%t%ID -!!$ -!!$ print *, rank, ' minmax: ', ppm_topo(i)%t%min_subd, ' max ...', ppm_topo(i)%t%max_subd -!!$ print *, rank ,' bc ', ppm_topo(i)%t%subs_bc -!!$ -!!$ end do -!!$ -!!$ + ! Init ghosts ... + call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) + call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity, 3, info) + call ppm_map_field_send(info) + call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info) + + derive = ppm_poisson_drv_curl_fd4 + call init_poisson_solver(vorticity,velocity,topo%ID, topo%mesh(1)%ID,derive) + + ! allocate(err1(dime,topo%nsublist),err2(dime,topo%nsublist),errinf(dime,topo%nsublist)) + + print *, '[',rank,'] pas espace/temps ', grid_step, time_step + print *, '[',rank,'] mesh ', shape(topo%mesh(1)%nnodes), topo%mesh(1)%nnodes + + allocate(drag(max_iter)) + + open(10,file='drag') + ! Time loop + do while(current_time < final_time .and. iter <= max_iter) + print *, '================ Start iter ',iter,' ================' - do while(current_time < final_time ) - ! Compute velocity from vorticity ! Two steps: ! - solve Poisson for stream_function and update velocity from stream_function : everything is supposed ! to be done in ppm routine; indeed we do not have to deal with stream_function. ! - update velocity to fit with required boundary values - call solve_poisson(vorticity, velocity, topo%ID, topo%mesh(1)%ID,domain_ghostsize) - - call fit_velocity(velocity, grid_resolution,domain_bc) - + call MPI_BARRIER(MPI_COMM_WORLD,info) + !call fit_velocity(velocity, grid_step,lengths,topo,boundary_layer_min,boundary_layer_max) ! Penalize velocity call penalise_velocity(velocity,topo,time_step) - - ! PPM map : send new values to neighbours call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,velocity, 3, info) call ppm_map_field_send(info) call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,velocity,3,domain_ghostsize,info) + + call get_drag(drag(iter)) + !print *, 'drag...', drag(iter,rank) + write(10,'(e14.5,e14.5)') current_time, drag(iter) - ! Compute the new "penalized" vorticity +!!$ ! Compute the new "penalized" vorticity call curlDF4(velocity, vorticity,topo, grid_step) + + ! Computation of rhs of vorticity equation (on the grid) ... + call computeRHS(velocity, vorticity, rhs, topo, grid_step, nu) + call MPI_BARRIER(MPI_COMM_WORLD,info) + + call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) + call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,rhs, 3, info) + call ppm_map_field_send(info) + call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,rhs,3,domain_ghostsize,info) - ! Initialize the particles according to the last computed vorticity + ! Initialize the particles according to the last computed vorticity, velocity and rhs resetpos = .TRUE. - vorticity = 0 - vorticity(3,:,2,2,1) = 12.0 - call computeRHS(velocity, vorticity, rhs, topo, grid_step, nu) + +!!$ call printToVTK("pre",0, current_time,velocity,vorticity,topo,grid_step) call update_particles(vorticity, resetpos, topo%ID, topo%mesh(1)%ID, domain_ghostsize, velocity, rhs) ! Integrate - call push_particles() + call push_particles(time_step,topo%ID, topo%mesh(1)%ID, domain_ghostsize, velocity,rhs) + ! Remesh - call remesh() - + call remesh(topo%ID, topo%mesh(1)%ID,domain_ghostsize,vorticity) + +! call printToVTK("post",iter, current_time,velocity,vorticity,topo,grid_step) + current_time = current_time + time_step - current_time = final_time - enddo + !current_time = final_time + iter = iter+1 + print *, 'end of iteration ', iter-1 + call MPI_BARRIER(MPI_COMM_WORLD,info) + enddo + + close(10) call ppm_finalize(info) print *, 'end ppm simulation' - + deallocate(drag) end subroutine main_client diff --git a/HySoP/src/main/Particles.f90 b/HySoP/src/main/Particles.f90 index e972dd6e5266050746511724aa7379ed5a04a9d3..4e3199e75f26a7675543d2d33f4d29f81a82c805 100755 --- a/HySoP/src/main/Particles.f90 +++ b/HySoP/src/main/Particles.f90 @@ -1,12 +1,14 @@ !> Creation, initialisation ... of particles module Particles - use ppm_module_rmsh, only : ppm_rmsh_create_part + use ppm_module_rmsh, only : ppm_rmsh_create_part,ppm_interp_m2p, ppm_interp_p2m !, ppm_rmsh_remesh use client_data, only: mk, dime - use mpi +! use mpi use ppm_module_data, only : ppm_param_rmsh_kernel_mp4 - use ppm_module_interp_m2p, only: ppm_interp_m2p + use ppm_module_map_part + use ppm_module_util_dbg + implicit none private @@ -24,7 +26,7 @@ module Particles !> Particles main "var" real(mk), dimension(:,:), pointer :: omp=>NULL() !> Particle velocities - real(mk), dimension(:,:), pointer :: vp=>NULL() + real(mk), dimension(:,:), pointer :: velo_p=>NULL() !> Particles RHS term real(mk), dimension(:,:), pointer :: rhsp => NULL() !> BAckup vector for RK schemes @@ -38,19 +40,53 @@ module Particles contains - !> Set required parameters for particles creations - subroutine init_particles() - + !> Set required parameters for particles creations and initialize a first particles distribution + subroutine init_particles(field_on_grid,topoid, meshid,vel) + ! The field used to create particles + real(mk), dimension(:,:,:,:,:), pointer :: field_on_grid + ! topo and mesh ids + integer, intent(in) :: topoid + integer, intent(in) :: meshid + integer :: info, newNpart + ! velocity on grid + real(mk), dimension(:,:,:,:,:), pointer :: vel + + ! Cutoff : lower and upper bound for cutoff, i.e. for each value of the ref. field between cutoff(1) and cutoff(2), a particle will be + ! created. cutoff(1) = 1.D-6 cutoff(2) = 100000 - allocate(cutoff_weights(dime)) - cutoff_weights = 1.0 + !allocate(cutoff_weights(dime)) + !cutoff_weights = 1.0 kernel = ppm_param_rmsh_kernel_mp4 npart = 0 last_npart = npart first_call = .TRUE. + ! --------> The following call will allocate memory for xp, omp and velo_p, right? + ! --------> And deallocation will be handled by ppm also? + call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,.TRUE.,& + field_wp=vel,wp=velo_p,lda2=dime) + + !> --------> Other variables carried by particles but not allocated during ppm_rmsh_create call + ! --------> is there a better way to do this, with map_push or other ppm routines? + allocate(rhsp(dime,npart),xp0(dime,npart)) + + ! --------> Is the following (map_part_global) required? I would say yes but I'm not sure ... + newNpart = 0 + call ppm_map_part_global(topoid,xp,npart,info) ! positions + call ppm_map_part_push(velo_p,dime,npart,info) ! velocity + call ppm_map_part_push(rhsp,dime,npart,info) ! rhs + call ppm_map_part_push(omp,dime,npart,info) ! vorticity + call ppm_map_part_push(xp0,dime,npart,info) + call ppm_map_part_send(npart,newNpart,info) ! send + call ppm_map_part_pop(xp0,dime,npart,newNpart,info) + call ppm_map_part_pop(omp,dime,npart,newNpart,info) + call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) + call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) + call ppm_map_part_pop(xp,dime,npart,newNpart,info) + npart = newNpart + end subroutine init_particles !> Create particles distribution @@ -62,7 +98,7 @@ contains ! topo and mesh ids integer, intent(in) :: topoid integer, intent(in) :: meshid - integer :: info, rank + integer :: info, newNpart integer, dimension(:), pointer :: ghostsize ! velocity on grid real(mk), dimension(:,:,:,:,:), pointer :: vel @@ -71,93 +107,140 @@ contains info = 0 - call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + !call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) ! Call ppm func to create the particles distribution - print *, "???? ", rank, " ", associated(xp), npart - call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,cutoff_weights) - print *, "POST : ", rank, " ", associated(xp) - if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart - + last_npart = npart + call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,& + field_wp=vel,wp=velo_p,lda2=dime) + !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart - !> Other variables carried by particles - if(first_call) then - allocate(vp(dime,npart), rhsp(dime,npart), xp0(dime,npart)) - else if (last_npart.ne.npart) then - deallocate(vp, rhs, xp0) - vp => NULL() - xp0 => NULL() - rhs => NULL() - allocate(vp(dime,npart), rhsp(dime,npart), xp0(dime,npart)) - else - print *, "number of particles did not change ..." + newNpart = 0 + call ppm_map_part_partial(topoid,xp,npart,info) ! positions + call ppm_map_part_push(velo_p,dime,npart,info) ! velocity + call ppm_map_part_send(npart,newNpart,info) ! send + call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) + call ppm_map_part_pop(xp,dime,npart,newNpart,info) + npart = newNpart + + ! --------> rhsp and xp0 size was computed before rmsh_create and part_pop => must be updated + ! --------> deallocate/allocate may be done by some ppm routine? + if (last_npart.lt.npart) then + deallocate(rhsp,xp0) ! Todo : use ppm_alloc stuff + allocate(rhsp(dime,npart),xp0(dime,npart)) end if - last_npart = npart + ! --------> Ok, from here all vars carried by particles must have the following shape : dime,npart - !> mesh to particles for velocity - call ppm_interp_m2p(topoid, meshid, xp, npart, vp, dime, kernel, ghostsize, vel,info) - !> mesh to particles for rhs of vorticity eq. + !> --------> mesh to particles for velocity => done in ppm_rmsh_create_part ??? + ! call ppm_interp_m2p(topoid, meshid, xp, npart, velo_p, dime, kernel, ghostsize, vel,info) + !> --------> mesh to particles for rhs of vorticity eq. This routine does not resize rhsp, it must be of the right size before the call? call ppm_interp_m2p(topoid, meshid, xp, npart, rhsp, dime, kernel, ghostsize, rhs,info) - - print *, rhsp - + print *, 'End of update particles' + end subroutine update_particles !> time integration - subroutine push_particles() + !> Advection of particles, copy of Adrien's code. + subroutine push_particles(dt,topoid, meshid, ghostsize, vel,rhs) + !> time step + real(mk), intent(in) ::dt + ! topo and mesh ids + integer, intent(in) :: topoid + integer, intent(in) :: meshid + integer :: info!, rank + integer, dimension(:), pointer :: ghostsize + ! velocity on grid + real(mk), dimension(:,:,:,:,:), pointer :: vel + ! Secondary field to be mapped to particles + real(mk), dimension(:,:,:,:,:), pointer :: rhs + + !> local loop indices + integer :: i, newNpart + ! call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + do i=1,npart + + xp0(1,i) = xp(1,i) + xp0(2,i) = xp(2,i) + xp0(3,i) = xp(3,i) + + xp(1,i)=xp0(1,i)+0.5*dt*velo_p(1,i) + xp(2,i)=xp0(2,i)+0.5*dt*velo_p(2,i) + xp(3,i)=xp0(3,i)+0.5*dt*velo_p(3,i) + + end do + newNpart = 0 + + call ppm_map_part_partial(topoid,xp,npart,info) ! positions + call ppm_map_part_push(velo_p,dime,npart,info) ! velocity + call ppm_map_part_push(rhsp,dime,npart,info) ! rhs + call ppm_map_part_push(omp,dime,npart,info) ! vorticity + call ppm_map_part_push(xp0,dime,npart,info) + call ppm_map_part_send(npart,newNpart,info) ! send + call ppm_map_part_pop(xp0,dime,npart,newNpart,info) + call ppm_map_part_pop(omp,dime,npart,newNpart,info) + call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) + call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) + call ppm_map_part_pop(xp,dime,npart,newNpart,info) + + npart = newNpart + ! --------> No need to call any realloc or resize process for xp0, rhsp ... that was done during map_part_pop, right? + + !! Mesh to particles for the new particles positions ... + !> for velocity + call ppm_interp_m2p(topoid, meshid, xp, npart, velo_p, dime, kernel, ghostsize, vel,info) + !> for rhs of vorticity eq. + call ppm_interp_m2p(topoid, meshid, xp, npart, rhsp, dime, kernel, ghostsize, rhs,info) + + do i=1,npart + xp(1,i)=xp0(1,i)+dt*velo_p(1,i) + xp(2,i)=xp0(2,i)+dt*velo_p(2,i) + xp(3,i)=xp0(3,i)+dt*velo_p(3,i) + omp(1,i)=omp(1,i)+dt*rhsp(1,i) + omp(2,i)=omp(2,i)+dt*rhsp(2,i) + omp(3,i)=omp(3,i)+dt*rhsp(3,i) + end do + + newNpart = 0 + call ppm_map_part_partial(topoid,xp,npart,info) ! positions + call ppm_map_part_push(omp,dime,npart,info) ! strengths + call ppm_map_part_send(npart,newNpart,info) ! send + call ppm_map_part_pop(omp,dime,npart,newNpart,info) ! strengths + call ppm_map_part_pop(xp,dime,npart,newNpart,info) ! positions + npart = newNpart end subroutine push_particles - - subroutine remesh() + + subroutine remesh(topoid, meshid, ghostsize, vorticity) + ! topo and mesh ids + integer, intent(in) :: topoid + integer, intent(in) :: meshid + integer, dimension(:), pointer :: ghostsize + ! vorticity on grid + real(mk), dimension(:,:,:,:,:), pointer :: vorticity - !call ppm_rmsh_remesh(topo%ID,topo%mesh(1)%ID,xp,npart,up,dime,kernel, domain_ghostsize,info,vorticity) + integer info + + info = -1 + !call ppm_rmsh_remesh(topoid,meshid,xp,npart,omp,dime,kernel, ghostsize,info,vorticity) + !ghostlayer = 0.0 + !call ppm_dbg_print_d(topoid,ghostlayer,1,1,info,xp,npart) + call ppm_interp_p2m(topoid, meshid, xp, npart, omp, dime, kernel, ghostsize, vorticity, info) + + if(info.ne.0) then + stop 'Particles: ppm remesh error ' + end if end subroutine remesh subroutine free_particles() - if( associated(cutoff_weights)) deallocate(cutoff_weights) - end subroutine free_particles - - - subroutine advect(dt) + ! --------> is it required to call some specific routines to clean anything related to particles, at the end of the simulation? + ! or ppm_finalize do it all? + if(associated(rhsp)) deallocate(rhsp) + if(associated(xp0)) deallocate(xp0) - !> time step - real(mk), intent(in) ::dt - - !> local loop indices - integer :: i - - do i=1,npart - - xp0(1,i) = xp(1,i) - xp0(2,i) = xp(2,i) - xp0(3,i) = xp(3,i) - - xp(1,i)=xp0(1,i)+0.5*dt*vp(1,i) - xp(2,i)=xp0(2,i)+0.5*dt*vp(2,i) - xp(3,i)=xp0(3,i)+0.5*dt*vp(3,i) - - end do - -!!$ call interpo_l3_3d(vxg,xp,yp,zp,vx) -!!$ call interpo_l3_3d(vyg,xp,yp,zp,vy) -!!$ call interpo_l3_3d(vzg,xp,yp,zp,vz) -!!$ -!!$ call interpo_l3_3d(stxg,xp,yp,zp,stx) -!!$ call interpo_l3_3d(styg,xp,yp,zp,sty) -!!$ call interpo_l3_3d(stzg,xp,yp,zp,stz) -!!$ - do i=1,npart - xp(1,i)=xp0(1,i)+dt*vp(1,i) - xp(2,i)=xp0(2,i)+dt*vp(2,i) - xp(3,i)=xp0(3,i)+dt*vp(3,i) - omp(1,i)=omp(1,i)+dt*rhsp(1,i) - omp(2,i)=omp(2,i)+dt*rhsp(2,i) - omp(3,i)=omp(3,i)+dt*rhsp(3,i) - end do - - end subroutine advect - + ! if( associated(cutoff_weights)) deallocate(cutoff_weights) + end subroutine free_particles + end module Particles diff --git a/HySoP/src/main/Penalize.f90 b/HySoP/src/main/Penalize.f90 index 7191ac09551df4affd8709e220870b3d2c9acae6..839516870116063b173d31fee9bf5d7a4b060a0d 100755 --- a/HySoP/src/main/Penalize.f90 +++ b/HySoP/src/main/Penalize.f90 @@ -2,25 +2,35 @@ module penalisation use client_data, only: mk,dime use ppm_module_typedef, only : ppm_t_equi_mesh, ppm_t_topo - + use mpi implicit none private - public :: penalisation_init, penalise_velocity + public :: penalisation_init, penalise_velocity,get_drag ! Penalisation kernel function real(mk), dimension(:,:,:,:), pointer :: chi => NULL() + real(mk), dimension(:,:,:,:), pointer :: chi_sphere => NULL() + + ! Sphere radius and center position + real(mk), dimension(dime) :: sphere_pos + real(mk) :: sphere_radius ! real(mk) :: lambda + ! Drag + real(mk) :: local_drag + ! Elementary volume + real(mk) :: dvol + contains - subroutine penalisation_init(topo, ghostsize, meshStep) - integer, dimension(:), pointer :: ghostsize + subroutine penalisation_init(topo, meshStep, boundary_layer_min,boundary_layer_max) type(ppm_t_topo), pointer :: topo real(mk), dimension(:), pointer :: meshStep - + real(mk), dimension(:), pointer :: boundary_layer_max,boundary_layer_min + !> Number of points in each dir (1st index) for each sub (2nd index) integer, dimension(:,:), pointer :: ndata =>NULL() !> List of subdomains for the current proc - Warning, it seems that from ppm, isublist is of size nbproc (total number of proc) but @@ -32,22 +42,24 @@ contains integer::istat,i,j,k,isub,isubl ! Lower and upper bounds for fields - integer, dimension(dime) :: ldl, ldu integer, dimension(dime) :: vndata - ! z position - real(mk) :: zPos, zPos0 + ! x,y,z position + real(mk) :: x,y,z,x0,y0,z0 ! lower bound of the physical domain real(mk), dimension(:), pointer :: minPhys => NULL() ! mesh indexes integer, dimension(:,:), pointer :: istart => NULL() - ! virtual boundary layer position - real(mk) :: limitPos - limitPos = 1.0 + + real(mk) :: dist ! Set lambda value - lambda = 5.5D5 - + lambda = 6.D5 + + ! Set sphere position and radius + sphere_radius = 0.5 + sphere_pos = (boundary_layer_min + boundary_layer_max)*0.5 + ndata => topo%mesh(1)%nnodes isublist => topo%isublist nsublist = topo%nsublist @@ -56,10 +68,11 @@ contains vndata(2) = maxval(ndata(2,isublist(1:nsublist))) vndata(3) = maxval(ndata(3,isublist(1:nsublist))) - allocate(chi(vndata(1),vndata(2),vndata(3),nsublist),stat = istat) + allocate(chi_sphere(vndata(1),vndata(2),vndata(3),nsublist),chi(vndata(1),vndata(2),vndata(3),nsublist),stat = istat) if(istat.ne.0) stop 'Chi function allocation error.' chi = 0.0 + chi_sphere = 0.0 istart => topo%mesh(1)%istart minPhys => topo%min_physd @@ -67,20 +80,31 @@ contains do isub=1,nsublist ! loop over mesh nodes isubl=isublist(isub) - zpos0 = minPhys(3) + meshStep(3)*(istart(3,isubl)-2) + x0 = topo%min_subd(1,isubl) + y0 = topo%min_subd(2,isubl) + z0 = topo%min_subd(3,isubl) do k=1,ndata(3,isubl) - zpos = zpos0 + k*meshStep(3) + z = z0 + (k-1)*meshStep(3) do j=1,ndata(2,isubl) + y = y0 + (j-1)*meshStep(2) do i=1,ndata(1,isubl) - if( (zpos>limitPos).or.(zpos<-limitPos)) then - chi(i,j,k,isubl) = lambda + x = x0 + (i-1)*meshStep(1) + dist = (x-sphere_pos(1))**2 + (y-sphere_pos(2))**2 + (z-sphere_pos(3))**2 - sphere_radius**2 + if(dist <=0.0) then + chi_sphere(i,j,k,isub) = lambda + end if + if( (z> boundary_layer_max(3)).or.(z< boundary_layer_min(3))) then + chi(i,j,k,isub) = lambda end if end do end do end do end do - + chi = chi + chi_sphere nullify(isublist, ndata) + + local_drag = 0.0 + dvol = meshStep(1)*meshStep(2)*meshStep(3) end subroutine penalisation_init !> @@ -98,6 +122,7 @@ contains integer :: i,j,k, isub, nsublist,isubl real(mk) :: coef + local_drag = 0 nsublist = topo%nsublist ndata => topo%mesh(1)%nnodes @@ -106,10 +131,11 @@ contains do k=1,ndata(3,isubl) do j=1,ndata(2,isubl) do i=1,ndata(1,isubl) - coef =1./(1.+ dt*chi(i,j,k,isubl)) - vel(1,i,j,k,isubl)=vel(1,i,j,k,isubl)*coef - vel(2,i,j,k,isubl)=vel(2,i,j,k,isubl)*coef - vel(3,i,j,k,isubl)=vel(3,i,j,k,isubl)*coef + coef =1./(1.+ dt*chi(i,j,k,isub)) + vel(1,i,j,k,isub)=vel(1,i,j,k,isub)*coef + vel(2,i,j,k,isub)=vel(2,i,j,k,isub)*coef + vel(3,i,j,k,isub)=vel(3,i,j,k,isub)*coef + local_drag = local_drag + chi_sphere(i,j,k,isub)*vel(1,i,j,k,isub)*dvol end do end do end do @@ -118,5 +144,17 @@ contains nullify(ndata) end subroutine penalise_velocity - + +subroutine get_drag(drag) + real(mk), intent(out) :: drag + integer :: info + real(mk) :: coef,uinf + + uinf = 1.0 + coef = 2./(uinf**2*2.*sphere_radius) + local_drag = coef*local_drag + call MPI_AllReduce(local_drag, drag, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD,info) + +end subroutine get_drag + end module Penalisation diff --git a/HySoP/src/main/Solver.f90 b/HySoP/src/main/Solver.f90 index 08ad33fee54fd286c30a42c29a9361315bf7291c..232a8a2c3a68438c17a21a2c9b34b55b81141dd6 100755 --- a/HySoP/src/main/Solver.f90 +++ b/HySoP/src/main/Solver.f90 @@ -8,7 +8,7 @@ module Solver !use ppm_module_fdsolver_solve use ppm_module_mg_solv use ppm_module_poisson, only: ppm_poisson_init, ppm_poisson_plan, ppm_poisson_grn_pois_per, ppm_poisson_solve, & - ppm_poisson_drv_curl_fd2 + ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4, ppm_poisson_drv_none use client_topology use ppm_module_typedef, only : ppm_param_mesh_coarsen use ppm_module_mesh_derive, only: ppm_mesh_derive @@ -17,7 +17,7 @@ module Solver private - public :: init_poisson_solver, solve_poisson + public :: init_poisson_solver, solve_poisson, ppm_poisson_drv_curl_fd2, ppm_poisson_drv_curl_fd4, ppm_poisson_drv_none type(ppm_poisson_plan), pointer :: fftwplanForppm => NULL() @@ -89,25 +89,28 @@ contains end subroutine solve_poisson_multigrid - subroutine init_fftw(fieldin,fieldout, topoid, meshid) + subroutine init_fftw(fieldin,fieldout, topoid, meshid, deriveValue) real(mk), dimension(:,:,:,:,:), pointer :: fieldin, fieldout integer, intent(in) :: topoid, meshid + integer, intent(in), optional :: deriveValue ! Parameters for ppm poisson routine integer :: info ! Flag to select built-in Green functions (...) - integer :: green , deriveValue + integer :: green green = ppm_poisson_grn_pois_per ! periodic boundaries info = -1 - deriveValue = ppm_poisson_drv_curl_fd2 + + !deriveValue = ppm_poisson_drv_curl_fd2 allocate(fftwplanForppm) ! Call ppm routine to initialize fftw plan. - call ppm_poisson_init(topoid, meshid, fftwplanForppm, fieldin, fieldout, green,info, derive=deriveValue) + call ppm_poisson_init(topoid, meshid, fftwplanForppm, fieldin, fieldout, green,info,derive=deriveValue) + if(info.NE.0) stop 'PPM Poisson solver init failed.' end subroutine init_fftw @@ -119,10 +122,9 @@ contains integer :: info ! Finite diff scheme used to compute curl - integer :: dtype + !integer :: dtype info = -1 - dtype = ppm_poisson_drv_curl_fd2 ! Solves laplacian(fieldout) = - fieldin call ppm_poisson_solve(topoid, meshid, fftwplanForppm,fieldin,fieldout,ghostsize,info) @@ -137,4 +139,10 @@ contains end subroutine solve_poisson_fftw + subroutine finalize_poisson_fftw() + + end subroutine finalize_poisson_fftw + end module Solver + + diff --git a/HySoP/src/main/TestFunctions.f90 b/HySoP/src/main/TestFunctions.f90 new file mode 100755 index 0000000000000000000000000000000000000000..224dbff48a6304e0b1abc07bab626b9a7cf07ba9 --- /dev/null +++ b/HySoP/src/main/TestFunctions.f90 @@ -0,0 +1,236 @@ +module testsFunctions + + use client_data, only: mk + use client_topology, only: ppm_t_topo + implicit none + + real(mk) :: xref + integer :: np +contains + + !> Computes the analytical values for stream function, velocity and vorticity such that + !> * rhs_ex = -omega_ex = laplacian(psi_ex) + !> * vel_ex = nabla X psi_ex + !> * div(psi_ex) = div(omega_ex) = div(vel_ex) = 0.0 + !> * nabla X vel_ex = omega_ex + !> (see maple file) + subroutine poisson_analytic(topo, meshStep, rhs_ex,vel_ex,psi_ex) + + ! the topology ... + type(ppm_t_topo), pointer :: topo + ! size of mesh step in each dir + real(mk), dimension(:), pointer :: meshStep + ! rhs function values on the grid (i.e. -omega_ex) + real(mk), dimension(:,:,:,:,:), pointer :: rhs_ex + ! velocity function values on the grid + real(mk), dimension(:,:,:,:,:), pointer :: vel_ex + ! Stream function values on the grid + real(mk), dimension(:,:,:,:,:), pointer :: psi_ex + + + real(mk) :: x,y,z,cx,cy,cz,c2x,c2y,c2z,sx,sy,sz,s2x,s2y,s2z,x0,y0,z0 + integer :: i,j,k,isub,isubl + real(mk) :: pi + + pi = 4.0*atan(1.0_mk) + + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + ! Origin point on the local subdomain + x0 = topo%min_subd(1,isubl) + y0 = topo%min_subd(2,isubl) + z0 = topo%min_subd(3,isubl) + + do k=1,topo%mesh(1)%nnodes(3,isubl) + z = z0 + (k-1)*meshStep(3) + cz=cos(pi*z) + c2z=cos(2.*pi*z) + sz=sin(pi*z) + s2z=sin(2.*pi*z) + do j=1,topo%mesh(1)%nnodes(2,isubl) + y = y0 + (j-1)*meshStep(2) + cy=cos(pi*y) + c2y=cos(2.*pi*y) + sy=sin(pi*y) + s2y=sin(2.*pi*y) + !psi_ex(3,:,j,:,:) = y + + do i=1,topo%mesh(1)%nnodes(1,isubl) + x = x0 + (i-1)*meshStep(1) + + cx=cos(pi*x) + c2x=cos(2.*pi*x) + sx=sin(pi*x) + s2x=sin(2.*pi*x) + + rhs_ex(1,i,j,k,isub) = 8.*s2y*pi**2*s2z + rhs_ex(2,i,j,k,isub) = 8.*s2x*pi**2*s2z + rhs_ex(3,i,j,k,isub)= 8.*s2x*pi**2*s2y + + vel_ex(1,i,j,k,isub) = 2.*s2x*pi*(c2y - c2z) + vel_ex(2,i,j,k,isub) = 2.*s2y*pi*(c2z - c2x) + vel_ex(3,i,j,k,isub) = 2.*s2z*pi*(c2x - c2y) + + psi_ex(1,i,j,k,isub) = sy*sz + psi_ex(2,i,j,k,isub) = sx*sz + psi_ex(3,i,j,k,isub) = sx*sy + + end do + end do + end do + end do + end subroutine poisson_analytic + + subroutine init_vorticity(vorticity,topo, meshStep, boundary_layer_min,boundary_layer_max) + + ! vorticity field + real(mk), dimension(:,:,:,:,:), pointer :: vorticity + ! the topology ... + type(ppm_t_topo), pointer :: topo + ! size of mesh step in each dir + real(mk), dimension(:), pointer :: meshStep + real(mk),dimension(:),pointer :: boundary_layer_max, boundary_layer_min + integer :: i,j,k,isub,isubl + real(mk) :: pi,x,y,z,x0,y0,z0,physicalDomainSize_Z + + physicalDomainSize_Z = (boundary_layer_max(3)- boundary_layer_min(3))/2. + pi = 4.0*atan(1.0_mk) + vorticity = 0 + + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + ! Origin point on the local subdomain + x0 = topo%min_subd(1,isubl) + y0 = topo%min_subd(2,isubl) + z0 = topo%min_subd(3,isubl) + + do k=1,topo%mesh(1)%nnodes(3,isubl) + z = z0 + (k-1)*meshStep(3) + do j=1,topo%mesh(1)%nnodes(2,isubl) + y = y0 + (j-1)*meshStep(2) + do i=1,topo%mesh(1)%nnodes(1,isubl) + x = x0 + (i-1)*meshStep(1) + if( (z < boundary_layer_max(3)).and.(z>boundary_layer_min(3))) then + vorticity(2,i,j,k,isub) = -3.*z/(physicalDomainSize_Z)**2 + endif + end do + end do + end do + end do + end subroutine init_vorticity + + !> Computes the analytical values for stream function, velocity and vorticity such that + !> * rhs_ex = -omega_ex = laplacian(psi_ex) + !> * vel_ex = nabla X psi_ex + !> * div(psi_ex) = div(omega_ex) = div(vel_ex) = 0.0 + !> * nabla X vel_ex = omega_ex + !> (see maple file) + subroutine rhs_analytic(topo, meshStep, rhs_ex,velocity, vorticity,nu) + + ! the topology ... + type(ppm_t_topo), pointer :: topo + ! size of mesh step in each dir + real(mk), dimension(:), pointer :: meshStep + ! rhs function values on the grid (i.e. -omega_ex) + real(mk), dimension(:,:,:,:,:), pointer :: rhs_ex,velocity,vorticity + real(mk),intent(in) :: nu + + real(mk) :: x,y,z,cx,cy,cz,c2x,c2y,c2z,sx,sy,sz,s2x,s2y,s2z,x0,y0,z0 + integer :: i,j,k,isub,isubl + real(mk) :: pi + + pi = 4.0*atan(1.0_mk) + + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + ! Origin point on the local subdomain + x0 = topo%min_subd(1,isubl) + y0 = topo%min_subd(2,isubl) + z0 = topo%min_subd(3,isubl) + + do k=1,topo%mesh(1)%nnodes(3,isubl) + z = z0 + (k-1)*meshStep(3) + cz=cos(pi*z) + c2z=cos(2.*pi*z) + sz=sin(pi*z) + s2z=sin(2.*pi*z) + do j=1,topo%mesh(1)%nnodes(2,isubl) + y = y0 + (j-1)*meshStep(2) + cy=cos(pi*y) + c2y=cos(2.*pi*y) + sy=sin(pi*y) + s2y=sin(2.*pi*y) + !psi_ex(3,:,j,:,:) = y + + do i=1,topo%mesh(1)%nnodes(1,isubl) + x = x0 + (i-1)*meshStep(1) + + cx=cos(pi*x) + c2x=cos(2.*pi*x) + sx=sin(pi*x) + s2x=sin(2.*pi*x) + + rhs_ex(1,i,j,k,isub) = 32.*s2y*pi**4*s2z*(-2.*nu+c2x*c2y-c2x*c2z) + rhs_ex(2,i,j,k,isub) = -32.*s2x*pi**4*s2z*(2.*nu-c2y*c2z+c2x*c2y) + rhs_ex(3,i,j,k,isub) = 32.*s2x*pi**4*s2y*(-2.*nu+c2x*c2z-c2z*c2y) + + vorticity(1,i,j,k,isub) = 8.*s2y*pi**2*s2z + vorticity(2,i,j,k,isub) = 8.*s2x*pi**2*s2z + vorticity(3,i,j,k,isub)= 8.*s2x*pi**2*s2y + + velocity(1,i,j,k,isub) = 2.*s2x*pi*(c2y - c2z) + velocity(2,i,j,k,isub) = 2.*s2y*pi*(c2z - c2x) + velocity(3,i,j,k,isub) = 2.*s2z*pi*(c2x - c2y) + + + end do + end do + end do + end do + end subroutine rhs_analytic + + + subroutine test_particles(vorticity,velocity, topo, meshStep) + + ! vorticity field + real(mk), dimension(:,:,:,:,:), pointer :: vorticity ,velocity + ! the topology ... + type(ppm_t_topo), pointer :: topo + ! size of mesh step in each dir + real(mk), dimension(:), pointer :: meshStep + integer :: i,j,k,isub,isubl,l + real(mk) :: pi,x,y,z,x0,y0,z0 + + pi = 4.0*atan(1.0_mk) + vorticity = 0. + velocity = 0. + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + ! Origin point on the local subdomain + x0 = topo%min_subd(1,isubl) + y0 = topo%min_subd(2,isubl) + z0 = topo%min_subd(3,isubl) + + do k=1,topo%mesh(1)%nnodes(3,isubl) + z = z0 + (k-1)*meshStep(3) + do j=1,topo%mesh(1)%nnodes(2,isubl) + y = y0 + (j-1)*meshStep(2) + do i=1,topo%mesh(1)%nnodes(1,isubl) + x = x0 + (i-1)*meshStep(1) + ! print *, 'ehehehe' + if( (x .eq.10*meshStep(1)).and.(y.eq.0).and.(z.eq.xref)) then + do l=1,np + vorticity(3,i,j,k+l-1,isub) = 0.1; + velocity(3,i,j,k+l-1,isub) = 2.0 + enddo + endif + end do + end do + end do + end do + xref = xref + meshStep(3) + !np = np +1 + print *, xref + end subroutine test_particles + +end module testsFunctions diff --git a/HySoP/src/main/Topology.f90 b/HySoP/src/main/Topology.f90 index 8682d32eb777f4c7a2d6e3634ab60496a25acc27..c4eaf7272c4bd813ff2da07ee17fbfec91a2bdf0 100755 --- a/HySoP/src/main/Topology.f90 +++ b/HySoP/src/main/Topology.f90 @@ -4,7 +4,8 @@ module client_topology use ppm_module_typedef, only : ppm_t_topo use ppm_module_mktopo use ppm_module_data, only : ppm_param_assign_internal, ppm_param_decomp_cartesian, ppm_topo - use mpi +! use mpi + use client_data, only: mk,dime @@ -31,7 +32,7 @@ contains integer :: false_np integer assigning, decomposition integer, dimension(:,:), pointer :: istart => NULL(), ndata=>NULL() - integer :: info, meshid, topoid!,rank + integer :: info, meshid, topoid,rank info = -1 topoid=0 @@ -47,10 +48,11 @@ contains istart, ndata, resolution, info) if(info.ne.0) stop 'Topology:init initialisation failed' - !call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) - +! call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) + ! print *, '[',rank,'], topo :', shape(ndata), '//', ndata ! Get the created topology ... topo=>ppm_topo(topoid)%t + ! Notes Franck: ! nsublist is the number of subdomains for one processus (mpi proc not physical proc ...) @@ -59,9 +61,7 @@ contains ! sub2proc[i] = global number of the processus on which subdomain i is attached ! !!$ -!!$ print *, '======================= [', rank,'] ', topo%nsublist -!!$ print *, '======================= [', rank,'] ', shape(topo%isublist) -!!$ print *, '======================= [', rank,'] ', topo%isublist + print *, '[', rank,'], subs : ', topo%nsublist, '//', shape(topo%isublist),'//', topo%isublist !!$ print *, '======================= [', rank,'] ', shape(topo%sub2proc) !!$ print *, '======================= [', rank,'] ', topo%sub2proc !!$ diff --git a/HySoP/src/main/VectorCalc.f90 b/HySoP/src/main/VectorCalc.f90 index 1e4dd7a8120c3431a4e4f56bbfd9cd3c7b79c2ae..84b3a07567f60a90d83e1edc355a294ff2e05456 100755 --- a/HySoP/src/main/VectorCalc.f90 +++ b/HySoP/src/main/VectorCalc.f90 @@ -3,6 +3,7 @@ module vectorcalculus use client_data, only: mk,dime use client_topology, only: ppm_t_topo + implicit none contains @@ -18,7 +19,7 @@ contains real(mk) :: facx, facy, facz integer :: isub, i,j,k, isubl - + facx = 1.0_mk/(12.0_mk*meshStep(1)) facy = 1.0_mk/(12.0_mk*meshStep(2)) @@ -29,27 +30,26 @@ contains do k=1,topo%mesh(1)%nnodes(3,isubl) do j=1,topo%mesh(1)%nnodes(2,isubl) do i=1,topo%mesh(1)%nnodes(1,isubl) - fieldout(1,i,j,k,isub) = facz*(& + fieldout(1,i,j,k,isub) = -facz*(& -fieldin(2,i,j,k+2,isub)+8.0_mk*fieldin(2,i,j,k+1,isub)-8.0_mk*fieldin(2,i,j,k-1,isub)+fieldin(2,i,j,k-2,isub)& - ) -facy* (& + ) + facy*(& -fieldin(3,i,j+2,k,isub)+8.0_mk*fieldin(3,i,j+1,k,isub)-8.0_mk*fieldin(3,i,j-1,k,isub)+fieldin(3,i,j-2,k,isub)& ) - fieldout(2,i,j,k,isub) = facx*(& + fieldout(2,i,j,k,isub) = -facx*(& -fieldin(3,i+2,j,k,isub)+8.0_mk*fieldin(3,i+1,j,k,isub)-8.0_mk*fieldin(3,i-1,j,k,isub)+fieldin(3,i-2,j,k,isub)& - ) -facz*( & + ) + facz*( & -fieldin(1,i,j,k+2,isub)+8.0_mk*fieldin(1,i,j,k+1,isub)-8.0_mk*fieldin(1,i,j,k-1,isub)+fieldin(1,i,j,k-2,isub)) - fieldout(3,i,j,k,isub) = facy*(& + fieldout(3,i,j,k,isub) = -facy*(& -fieldin(1,i,j+2,k,isub)+8.0_mk*fieldin(1,i,j+1,k,isub)-8.0_mk*fieldin(1,i,j-1,k,isub)+fieldin(1,i,j-2,k,isub)& - ) -facx*(& + ) +facx*(& -fieldin(2,i+2,j,k,isub)+8.0_mk*fieldin(2,i+1,j,k,isub)-8.0_mk*fieldin(2,i-1,j,k,isub)+fieldin(2,i-2,j,k,isub)) enddo enddo enddo enddo - end subroutine curldf4 - !> Computes strech and diffusion terms + !> Computes strech and diffusion terms. This is a copy of Adrien's code. subroutine computeRHS(velocity, vorticity, rhs, topo, meshStep, nu) real(mk), dimension(:,:,:,:,:), pointer :: velocity @@ -59,183 +59,232 @@ contains real(mk), dimension(:), pointer :: meshStep real(mk), intent(in) :: nu - integer :: i,j,k, ip1, im1, ip2, im2, jp1, jm1, jp2,jm2, kp1,km1,kp2,km2 + integer :: i,j,k integer :: isub,isubl real(mk), dimension(3) :: tx, ty, tz real(mk) :: facx,facy,facz, facx2,facy2,facz2 integer :: nx, ny, nz - - - facx=1.D0/(12.D0*meshStep(1)) - facy=1.D0/(12.D0*meshStep(2)) - facz=1.D0/(12.D0*meshStep(3)) - facx2=1.D0/(12.D0**meshStep(1)**2) - facy2=1.D0/(12.D0**meshStep(2)**2) - facz2=1.D0/(12.D0**meshStep(3)**2) - + + facx=1._mk/(12._mk*meshStep(1)) + facy=1._mk/(12._mk*meshStep(2)) + facz=1._mk/(12._mk*meshStep(3)) + facx2=1._mk/(12._mk*meshStep(1)**2) + facy2=1._mk/(12._mk*meshStep(2)**2) + facz2=1._mk/(12._mk*meshStep(3)**2) do isub=1,topo%nsublist isubl=topo%isublist(isub) nx = topo%mesh(1)%nnodes(1,isubl) ny = topo%mesh(1)%nnodes(2,isubl) nz = topo%mesh(1)%nnodes(3,isubl) - + do k=1,nz do j=1,ny do i= 1,nx - - ip1=mod(i+1+nx-1,nx)+1 - im1=mod(i-1+nx-1,nx)+1 - ip2=mod(i+2+nx-1,nx)+1 - im2=mod(i-2+nx-1,nx)+1 - - jp1=mod(j+1+ny-1,ny)+1 - jm1=mod(j-1+ny-1,ny)+1 - jp2=mod(j+2+ny-1,ny)+1 - jm2=mod(j-2+ny-1,ny)+1 - - kp1=mod(k+1+nz-1,nz)+1 - km1=mod(k-1+nz-1,nz)+1 - kp2=mod(k+2+nz-1,nz)+1 - km2=mod(k-2+nz-1,nz)+1 - !stretch !------ tx(1)= & - vorticity(1,im2,j,k,isub)*velocity(1,im2,j,k,isub) - 8.D0*& - vorticity(1,im1,j,k,isub)*velocity(1,im1,j,k,isub) + 8.D0*& - vorticity(1,ip1,j,k,isub)*velocity(1,ip1,j,k,isub) - & - vorticity(1,ip2,j,k,isub)*velocity(1,ip2,j,k,isub) + vorticity(1,i-2,j,k,isub)*velocity(1,i-2,j,k,isub) - 8._mk*& + vorticity(1,i-1,j,k,isub)*velocity(1,i-1,j,k,isub) + 8._mk*& + vorticity(1,i+1,j,k,isub)*velocity(1,i+1,j,k,isub) - & + vorticity(1,i+2,j,k,isub)*velocity(1,i+2,j,k,isub) tx(2)= & - vorticity(1,im2,j,k,isub)*velocity(2,im2,j,k,isub) - 8.D0*& - vorticity(1,im1,j,k,isub)*velocity(2,im1,j,k,isub) + 8.D0*& - vorticity(1,ip1,j,k,isub)*velocity(2,ip1,j,k,isub) - & - vorticity(1,ip2,j,k,isub)*velocity(2,ip2,j,k,isub) + vorticity(1,i-2,j,k,isub)*velocity(2,i-2,j,k,isub) - 8._mk*& + vorticity(1,i-1,j,k,isub)*velocity(2,i-1,j,k,isub) + 8._mk*& + vorticity(1,i+1,j,k,isub)*velocity(2,i+1,j,k,isub) - & + vorticity(1,i+2,j,k,isub)*velocity(2,i+2,j,k,isub) tx(3)=& - vorticity(1,im2,j,k,isub)*velocity(3,im2,j,k,isub) - 8.D0*& - vorticity(1,im1,j,k,isub)*velocity(3,im1,j,k,isub) + 8.D0*& - vorticity(1,ip1,j,k,isub)*velocity(3,ip1,j,k,isub) - & - vorticity(1,ip2,j,k,isub)*velocity(3,ip2,j,k,isub) + vorticity(1,i-2,j,k,isub)*velocity(3,i-2,j,k,isub) - 8._mk*& + vorticity(1,i-1,j,k,isub)*velocity(3,i-1,j,k,isub) + 8._mk*& + vorticity(1,i+1,j,k,isub)*velocity(3,i+1,j,k,isub) - & + vorticity(1,i+2,j,k,isub)*velocity(3,i+2,j,k,isub) ty(1)=& - vorticity(2,i,jm2,k,isub)*velocity(1,i,jm2,k,isub) - 8.D0*& - vorticity(2,i,jm1,k,isub)*velocity(1,i,jm1,k,isub) + 8.D0*& - vorticity(2,i,jp1,k,isub)*velocity(1,i,jp1,k,isub) - & - vorticity(2,i,jp2,k,isub)*velocity(1,i,jp2,k,isub) + vorticity(2,i,j-2,k,isub)*velocity(1,i,j-2,k,isub) - 8._mk*& + vorticity(2,i,j-1,k,isub)*velocity(1,i,j-1,k,isub) + 8._mk*& + vorticity(2,i,j+1,k,isub)*velocity(1,i,j+1,k,isub) - & + vorticity(2,i,j+2,k,isub)*velocity(1,i,j+2,k,isub) ty(2)=& - vorticity(2,i,jm2,k,isub)*velocity(2,i,jm2,k,isub) - 8.D0*& - vorticity(2,i,jm1,k,isub)*velocity(2,i,jm1,k,isub) + 8.D0*& - vorticity(2,i,jp1,k,isub)*velocity(2,i,jp1,k,isub) - & - vorticity(2,i,jp2,k,isub)*velocity(2,i,jp2,k,isub) + vorticity(2,i,j-2,k,isub)*velocity(2,i,j-2,k,isub) - 8._mk*& + vorticity(2,i,j-1,k,isub)*velocity(2,i,j-1,k,isub) + 8._mk*& + vorticity(2,i,j+1,k,isub)*velocity(2,i,j+1,k,isub) - & + vorticity(2,i,j+2,k,isub)*velocity(2,i,j+2,k,isub) ty(3)=& - vorticity(2,i,jm2,k,isub)*velocity(3,i,jm2,k,isub) - 8.D0*& - vorticity(2,i,jm1,k,isub)*velocity(3,i,jm1,k,isub) + 8.D0*& - vorticity(2,i,jp1,k,isub)*velocity(3,i,jp1,k,isub) - & - vorticity(2,i,jp2,k,isub)*velocity(3,i,jp2,k,isub) + vorticity(2,i,j-2,k,isub)*velocity(3,i,j-2,k,isub) - 8._mk*& + vorticity(2,i,j-1,k,isub)*velocity(3,i,j-1,k,isub) + 8._mk*& + vorticity(2,i,j+1,k,isub)*velocity(3,i,j+1,k,isub) - & + vorticity(2,i,j+2,k,isub)*velocity(3,i,j+2,k,isub) tz(1)=& - vorticity(3,i,j,km2,isub)*velocity(1,i,j,km2,isub) - 8.D0*& - vorticity(3,i,j,km1,isub)*velocity(1,i,j,km1,isub) + 8.D0*& - vorticity(3,i,j,kp1,isub)*velocity(1,i,j,kp1,isub) - & - vorticity(3,i,j,kp2,isub)*velocity(1,i,j,kp2,isub) + vorticity(3,i,j,k-2,isub)*velocity(1,i,j,k-2,isub) - 8._mk*& + vorticity(3,i,j,k-1,isub)*velocity(1,i,j,k-1,isub) + 8._mk*& + vorticity(3,i,j,k+1,isub)*velocity(1,i,j,k+1,isub) - & + vorticity(3,i,j,k+2,isub)*velocity(1,i,j,k+2,isub) tz(2)=& - vorticity(3,i,j,km2,isub)*velocity(2,i,j,km2,isub) - 8.D0*& - vorticity(3,i,j,km1,isub)*velocity(2,i,j,km1,isub) + 8.D0*& - vorticity(3,i,j,kp1,isub)*velocity(2,i,j,kp1,isub) - & - vorticity(3,i,j,kp2,isub)*velocity(2,i,j,kp2,isub) + vorticity(3,i,j,k-2,isub)*velocity(2,i,j,k-2,isub) - 8._mk*& + vorticity(3,i,j,k-1,isub)*velocity(2,i,j,k-1,isub) + 8._mk*& + vorticity(3,i,j,k+1,isub)*velocity(2,i,j,k+1,isub) - & + vorticity(3,i,j,k+2,isub)*velocity(2,i,j,k+2,isub) tz(3)=& - vorticity(3,i,j,km2,isub)*velocity(3,i,j,km2,isub) - 8.D0*& - vorticity(3,i,j,km1,isub)*velocity(3,i,j,km1,isub) + 8.D0*& - vorticity(3,i,j,kp1,isub)*velocity(3,i,j,kp1,isub) - & - vorticity(3,i,j,kp2,isub)*velocity(3,i,j,kp2,isub) + vorticity(3,i,j,k-2,isub)*velocity(3,i,j,k-2,isub) - 8._mk*& + vorticity(3,i,j,k-1,isub)*velocity(3,i,j,k-1,isub) + 8._mk*& + vorticity(3,i,j,k+1,isub)*velocity(3,i,j,k+1,isub) - & + vorticity(3,i,j,k+2,isub)*velocity(3,i,j,k+2,isub) rhs(1,i,j,k,isub)=facx*tx(1)+facy*ty(1)+facz*tz(1) rhs(2,i,j,k,isub)=facx*tx(2)+facy*ty(2)+facz*tz(2) rhs(3,i,j,k,isub)=facx*tx(3)+facy*ty(3)+facz*tz(3) - !diffusion !---------- - tx(1)= - & - vorticity(1,ip2,j,k,isub) + 16.D0*& - vorticity(1,ip1,j,k,isub) - 30.D0*& - vorticity(1,i,j,k,isub) + 16.D0*& - vorticity(1,im1,j,k,isub) - & - vorticity(1,im2,j,k,isub) + vorticity(1,i+2,j,k,isub) + 16._mk*& + vorticity(1,i+1,j,k,isub) - 30._mk*& + vorticity(1,i,j,k,isub) + 16._mk*& + vorticity(1,i-1,j,k,isub) - & + vorticity(1,i-2,j,k,isub) tx(2)= - & - vorticity(2,ip2,j,k,isub) + 16.D0*& - vorticity(2,ip1,j,k,isub) - 30.D0*& - vorticity(2,i,j,k,isub) + 16.D0*& - vorticity(2,im1,j,k,isub) - & - vorticity(2,im2,j,k,isub) + vorticity(2,i+2,j,k,isub) + 16._mk*& + vorticity(2,i+1,j,k,isub) - 30._mk*& + vorticity(2,i,j,k,isub) + 16._mk*& + vorticity(2,i-1,j,k,isub) - & + vorticity(2,i-2,j,k,isub) tx(3) = - & - vorticity(3,ip2,j,k,isub) + 16.D0*& - vorticity(3,ip1,j,k,isub) - 30.D0*& - vorticity(3,i,j,k,isub) + 16.D0*& - vorticity(3,im1,j,k,isub) - & - vorticity(3,im2,j,k,isub) + vorticity(3,i+2,j,k,isub) + 16._mk*& + vorticity(3,i+1,j,k,isub) - 30._mk*& + vorticity(3,i,j,k,isub) + 16._mk*& + vorticity(3,i-1,j,k,isub) - & + vorticity(3,i-2,j,k,isub) ty(1)= - & - vorticity(1,i,jp2,k,isub) + 16.D0*& - vorticity(1,i,jp1,k,isub) - 30.D0*& - vorticity(1,i,j,k,isub) + 16.D0*& - vorticity(1,i,jm1,k,isub) - & - vorticity(1,i,jm2,k,isub) + vorticity(1,i,j+2,k,isub) + 16._mk*& + vorticity(1,i,j+1,k,isub) - 30._mk*& + vorticity(1,i,j,k,isub) + 16._mk*& + vorticity(1,i,j-1,k,isub) - & + vorticity(1,i,j-2,k,isub) ty(2)= - & - vorticity(2,i,jp2,k,isub) + 16.D0*& - vorticity(2,i,jp1,k,isub) - 30.D0*& - vorticity(2,i,j,k,isub) + 16.D0*& - vorticity(2,i,jm1,k,isub) - & - vorticity(2,i,jm2,k,isub) + vorticity(2,i,j+2,k,isub) + 16._mk*& + vorticity(2,i,j+1,k,isub) - 30._mk*& + vorticity(2,i,j,k,isub) + 16._mk*& + vorticity(2,i,j-1,k,isub) - & + vorticity(2,i,j-2,k,isub) ty(3)= - & - vorticity(3,i,jp2,k,isub) + 16.D0*& - vorticity(3,i,jp1,k,isub) - 30.D0*& - vorticity(3,i,j,k,isub) + 16.D0*& - vorticity(3,i,jm1,k,isub) - & - vorticity(3,i,jm2,k,isub) + vorticity(3,i,j+2,k,isub) + 16._mk*& + vorticity(3,i,j+1,k,isub) - 30._mk*& + vorticity(3,i,j,k,isub) + 16._mk*& + vorticity(3,i,j-1,k,isub) - & + vorticity(3,i,j-2,k,isub) tz(1)= - & - vorticity(1,i,j,kp2,isub) + 16.D0*& - vorticity(1,i,j,kp1,isub) - 30.D0*& - vorticity(1,i,j,k,isub) + 16.D0*& - vorticity(1,i,j,km1,isub) - & - vorticity(1,i,j,km2,isub) + vorticity(1,i,j,k+2,isub) + 16._mk*& + vorticity(1,i,j,k+1,isub) - 30._mk*& + vorticity(1,i,j,k,isub) + 16._mk*& + vorticity(1,i,j,k-1,isub) - & + vorticity(1,i,j,k-2,isub) tz(2)= - & - vorticity(2,i,j,kp2,isub) + 16.D0*& - vorticity(2,i,j,kp1,isub) - 30.D0*& - vorticity(2,i,j,k,isub) + 16.D0*& - vorticity(2,i,j,km1,isub) - & - vorticity(2,i,j,km2,isub) + vorticity(2,i,j,k+2,isub) + 16._mk*& + vorticity(2,i,j,k+1,isub) - 30._mk*& + vorticity(2,i,j,k,isub) + 16._mk*& + vorticity(2,i,j,k-1,isub) - & + vorticity(2,i,j,k-2,isub) tz(3)=- & - vorticity(3,i,j,kp2,isub) + 16.D0*& - vorticity(3,i,j,kp1,isub) - 30.D0*& - vorticity(3,i,j,k,isub) + 16.D0*& - vorticity(3,i,j,km1,isub) - & - vorticity(3,i,j,km2,isub) + vorticity(3,i,j,k+2,isub) + 16._mk*& + vorticity(3,i,j,k+1,isub) - 30._mk*& + vorticity(3,i,j,k,isub) + 16._mk*& + vorticity(3,i,j,k-1,isub) - & + vorticity(3,i,j,k-2,isub) rhs(1,i,j,k,isub)=rhs(1,i,j,k,isub)+nu*(facx2*tx(1)+facy2*ty(1)+facz2*tz(1)) rhs(2,i,j,k,isub)=rhs(2,i,j,k,isub)+nu*(facx2*tx(2)+facy2*ty(2)+facz2*tz(2)) rhs(3,i,j,k,isub)=rhs(3,i,j,k,isub)+nu*(facx2*tx(3)+facy2*ty(3)+facz2*tz(3)) - + end do end do end do end do end subroutine computeRHS + subroutine norm1(norm,field,meshStep,topo) + real(mk), dimension(:,:), pointer :: norm + real(mk), dimension(:,:,:,:,:), pointer :: field + real(mk), dimension(:), pointer :: meshStep + type(ppm_t_topo), pointer :: topo + + real(mk) :: h3 + integer :: isub, isubl + integer, dimension(:,:), pointer :: nnodes => NULL() + + nnodes => topo%mesh(1)%nnodes + h3 = meshStep(1)*meshStep(2)*meshStep(3) + + + do isub=1,topo%nsublist + + isubl=topo%isublist(isub) + norm(1,isub) = h3*sum(abs(field(1,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + norm(2,isub) = h3*sum(abs(field(2,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + norm(3,isub) = h3*sum(abs(field(3,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + + end do + nullify(nnodes) + end subroutine norm1 + + subroutine norm2(norm,field,meshStep,topo) + real(mk), dimension(:,:), pointer :: norm + real(mk), dimension(:,:,:,:,:), pointer :: field + real(mk), dimension(:), pointer :: meshStep + type(ppm_t_topo), pointer :: topo + real(mk) :: h3 + integer :: isub, isubl + integer, dimension(:,:), pointer :: nnodes=>NULL() + nnodes => topo%mesh(1)%nnodes + + h3 = meshStep(1)*meshStep(2)*meshStep(3) + + do isub=1,topo%nsublist + + isubl=topo%isublist(isub) + norm(1,isub) = sqrt(h3*sum(abs(field(1,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))**2)) + norm(2,isub) = sqrt(h3*sum(abs(field(2,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))**2)) + norm(3,isub) = sqrt(h3*sum(abs(field(3,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))**2)) + end do + nullify(nnodes) + end subroutine norm2 + + subroutine normInf(norm,field,meshStep,topo) + real(mk), dimension(:,:), pointer :: norm + real(mk), dimension(:,:,:,:,:), pointer :: field + real(mk), dimension(:), pointer :: meshStep + type(ppm_t_topo), pointer :: topo + + real(mk) :: h3 + integer :: isub, isubl + integer, dimension(:,:), pointer :: nnodes=>NULL() + + nnodes => topo%mesh(1)%nnodes + h3 = real(meshStep(1)*meshStep(2)*meshStep(3),mk) + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + norm(1,isub) = maxval(abs(field(1,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + norm(2,isub) = maxval(abs(field(2,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + norm(3,isub) = maxval(abs(field(3,1:nnodes(1,isubl),1:nnodes(2,isubl),1:nnodes(3,isubl),isub))) + end do + + nullify(nnodes) + end subroutine normInf + end module vectorcalculus diff --git a/HySoP/src/main/io_vtk.f90 b/HySoP/src/main/io_vtk.f90 new file mode 100755 index 0000000000000000000000000000000000000000..926943d8bbc3942dcd8a6107c547a7ee9250767c --- /dev/null +++ b/HySoP/src/main/io_vtk.f90 @@ -0,0 +1,97 @@ +!> Tools for vtk output +module io_vtk + + use client_data, only:mk + use client_topology, only: ppm_t_topo + !use mpi + implicit none + + + +contains + + !> print velocity and vorticity to vtk file + !> + subroutine printToVTK(filename,iter,current_time,velocity,vorticity,topo,spacing) + + ! File name prefix for velocity and vorticity outputs + character(len=*), intent(in) :: filename + ! Current iteration number + integer, intent(in) :: iter + real(kind=8),intent(in) :: current_time + real(mk), dimension(:,:,:,:,:), pointer :: velocity, vorticity + type(ppm_t_topo), pointer :: topo + real(mk), dimension(:), pointer :: spacing + + character(len=60) :: buffer + integer :: i,j,k + integer :: nbpoints + ! sub proc number + integer :: isub,isubl, rank + ! local resolution + integer :: nx,ny,nz +! integer :: info + + !local filename (depends on subproc number) + character(len=30) :: localname,buffer1, buffer2,localname2 + + ! output = nb of iterations + write(buffer,*) iter + buffer = adjustl(buffer) + ! output = velocity +! write(name_vit,'(A)') trim(filename)//"_velocity_"//trim(buffer)//".vtk" + ! write(name_omg,'(A)') trim(filename)//"_vorticity_"//trim(buffer)//".vtk" + + !call res_vtk_vit(trim(name_vit)) + !call res_vtk_omega(trim(name_omg)) + + !call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) + + do isub=1,topo%nsublist + isubl=topo%isublist(isub) + nx = topo%mesh(1)%nnodes(1,isubl) + ny = topo%mesh(1)%nnodes(2,isubl) + nz = topo%mesh(1)%nnodes(3,isubl) + nbpoints = nx*ny*nz + write(buffer1, *) isubl + write(buffer2, *) rank + buffer1 = adjustl(buffer1) + buffer2 = adjustl(buffer2) + localname = trim(filename)//"_velocity_it"//trim(buffer)//"_"//trim(buffer2)//"_"//trim(buffer1)//".vtk" + localname2 = trim(filename)//"_vorticity_it"//trim(buffer)//"_"//trim(buffer2)//"_"//trim(buffer1)//".vtk" + open(unit=11,file=localname,form="formatted") + open(unit=12,file=localname2,form="formatted") + write(11,'(A26)')"# vtk DataFile Version 3.0" + write(11,'(A7)')"vecteur" + write(11,'(A5)')"ASCII" + write(11,'(A25)')"DATASET STRUCTURED_POINTS" + write(11,'(A10,3(i4,1x))')"DIMENSIONS",nx,ny,nz + write(11,'(a6,3(f10.5))')"ORIGIN", topo%min_subd(:,isubl) + write(11,'(A7,3(f10.5))')"SPACING",spacing + write(11,'(A10,i10)') "POINT_DATA",nbpoints + write(11,'(A21)') "VECTORS velo FLOAT" + write(12,'(A26)')"# vtk DataFile Version 3.0" + write(12,'(A7)')"vecteur" + write(12,'(A5)')"ASCII" + write(12,'(A25)')"DATASET STRUCTURED_POINTS" + write(12,'(A10,3(i4,1x))')"DIMENSIONS",nx,ny,nz + write(12,'(a6,3(f10.5))')"ORIGIN", topo%min_subd(:,isubl) + write(12,'(A7,3(f10.5))')"SPACING",spacing + write(12,'(A10,i10)') "POINT_DATA",nbpoints + write(12,'(A21)') "VECTORS vort FLOAT" + do k=1,nz + do j=1,ny + do i=1,nx + write(11,'(3(f20.9))') real(velocity(1,i,j,k,isub)),real(velocity(2,i,j,k,isub)),real(velocity(3,i,j,k, isub)) + write(12,'(3(f20.9))') real(vorticity(1,i,j,k,isub)),real(vorticity(2,i,j,k,isub)),real(vorticity(3,i,j,k, isub)) + end do + end do + end do + close(11) + close(12) + end do + + end subroutine printToVTK + + +end module io_vtk diff --git a/HySoP/src/main/main.cxx b/HySoP/src/main/main.cxx index ee277ddc157f407015d9962bb5dc15490f80ff6f..2793dbf7ff0d4f3df3e986dc81ea075ef1cb03c1 100644 --- a/HySoP/src/main/main.cxx +++ b/HySoP/src/main/main.cxx @@ -2,13 +2,15 @@ */ #include<iostream> -#include"ppm_wrapper.hpp" +//#include"ppm_wrapper.hpp" #include <string> #include <cstring> #include <Grid.hpp> #include<ParmesDef.hpp> #include<Domain.hpp> +#ifdef USE_MPI #include<mpi.h> +#endif #include "WrapC.hpp" using namespace std ; @@ -17,80 +19,83 @@ using Parmes::Def::real_t; extern "C" void createTopoG(int*, int*, int*, double*, double*, int*, double*); extern "C" void plouhmans(); extern "C" void NavierStokes3D(); +extern "C" void testMain(); void testPlouhmans() { +#ifdef USE_MPI MPI::Init(); assert(MPI::Is_initialized()); - +#endif NavierStokes3D(); - + //testMain(); +#ifdef USE_MPI MPI::Finalize(); - +#endif } -void test0() -{ - double t0; - int info; +//void test0() +// { +// double t0; +// int info; - // ===== Physical domain definition ===== - // Problem dimension - int pbDim = 3; - // dimensions of the domain - Parmes::Def::vector3D dimsD = { { 1.0, 3.1, 4.3} }; - // "Lowest" point - Parmes::Def::vector3D startPoint = { { 0., 1., 2.} }; - // The domain - Parmes::Model::Domain<3> domain(dimsD, startPoint); +// // ===== Physical domain definition ===== +// // Problem dimension +// int pbDim = 3; +// // dimensions of the domain +// Parmes::Def::vector3D dimsD = { { 1.0, 3.1, 4.3} }; +// // "Lowest" point +// Parmes::Def::vector3D startPoint = { { 0., 1., 2.} }; +// // The domain +// Parmes::Model::Domain<3> domain(dimsD, startPoint); - // ===== Grid definition ===== - // Number of points in each dir ... - boost::array<size_t, 3> nbSteps = { { 3, 4, 5} }; - Parmes::Discr::Grid<3> grid(domain, nbSteps); +// // ===== Grid definition ===== +// // Number of points in each dir ... +// boost::array<size_t, 3> nbSteps = { { 3, 4, 5} }; +// Parmes::Discr::Grid<3> grid(domain, nbSteps); - std::cout << grid << std::endl; +// std::cout << grid << std::endl; - std::string msg = "Main Programm in c++"; +// std::string msg = "Main Programm in c++"; - MPI::Init(); - assert(MPI::Is_initialized()); +// MPI::Init(); +// assert(MPI::Is_initialized()); - MPI::Intracomm Comm = MPI::COMM_WORLD; +// MPI::Intracomm Comm = MPI::COMM_WORLD; - // ==== Initialize ppm ==== - PPM::wrapper::init(pbDim, 8, -15, Comm, 2, &info, 99, 98,97); - PPM::wrapper::substart(msg, &t0, &info); +// // ==== Initialize ppm ==== +// PPM::wrapper::init(pbDim, 8, -15, Comm, 2, &info, 99, 98,97); +// PPM::wrapper::substart(msg, &t0, &info); - Parmes::Def::vector3D minPhys = {{0.0,0.0,0.0}}, maxPhys ={{ 3.14, 3.14, 6.28}}; - Parmes::Def::vector3D minSub, maxSub; - boost::array<int,6> bc = {{ 1, 1 ,1 ,1 ,1,1}}; - boost::array<int,3> nx ={{ 65, 65, 129}}; - Parmes::Def::real_t ghostsize = 1.0; - int topoid = -1, decomp, dim = 3; - int meshid = -1; - // Time step - real_t dt = 0.96; - real_t finalT = 1000; - real_t nu = 0.001; +// Parmes::Def::vector3D minPhys = {{0.0,0.0,0.0}}, maxPhys ={{ 3.14, 3.14, 6.28}}; +// Parmes::Def::vector3D minSub, maxSub; +// boost::array<int,6> bc = {{ 1, 1 ,1 ,1 ,1,1}}; +// boost::array<int,3> nx ={{ 65, 65, 129}}; +// Parmes::Def::real_t ghostsize = 1.0; +// int topoid = -1, decomp, dim = 3; +// int meshid = -1; +// // Time step +// real_t dt = 0.96; +// real_t finalT = 1000; +// real_t nu = 0.001; - Parmes::Def::real_t * costPerProc; - createTopoG(&dim, &topoid, &decomp,&minPhys[0], &maxPhys[0], &bc[0], &ghostsize); +// Parmes::Def::real_t * costPerProc; +// createTopoG(&dim, &topoid, &decomp,&minPhys[0], &maxPhys[0], &bc[0], &ghostsize); - cout << topoid << endl; +// cout << topoid << endl; - PPM::wrapper::finalize(info); +// PPM::wrapper::finalize(info); - // ==== Finalize everything ==== - MPI::Finalize(); - PPM::wrapper::substop(msg, &t0, &info); +// // ==== Finalize everything ==== +// MPI::Finalize(); +// PPM::wrapper::substop(msg, &t0, &info); -} +// } int main(int argc, char* argv[]) {