diff --git a/HySoP/INSTALL b/HySoP/INSTALL index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..ede41e12ce552dcc4a2874624ed3faedb6650006 100644 --- a/HySoP/INSTALL +++ b/HySoP/INSTALL @@ -0,0 +1,12 @@ + +1- create a "build" directory anywhere + + +2- cmake path-to-sources-of-Parmes -DPPMCore_DIR=/home/perignon/install/ppmcore/share/CMake/ -DPPMNumerics_DIR=/home/perignon/install/ppmnumerics/share/CMake/ + +3- make -j 8 + +4- mpirun -np 8 ./parmesRun + + + diff --git a/HySoP/src/main/Domain.f90 b/HySoP/src/main/Domain.f90 index 7f6d864c3a61c379e658dee2471d0a451e315760..2487885d4b4bd97d924210b1ffbcb7bbed55497e 100755 --- a/HySoP/src/main/Domain.f90 +++ b/HySoP/src/main/Domain.f90 @@ -40,7 +40,7 @@ contains domain_maxCoords(2) = zFolke ! 0.3: boundary layer domain_minCoords(3) = -zFolke - 0.3 - domain_maxCoords(3) = zFolke + 0.3 + domain_maxCoords(3) = zFolke + 0.3 ! Boundary conditions and ghosts allocate(domain_bc(2*dime), domain_ghostsize(dime), stat = istat) @@ -56,7 +56,7 @@ contains allocate(grid_resolution(dime),grid_step(dime),stat=istat) if(istat.ne.0) stop 'grid_resolution, allocation failed' - grid_resolution = 65 + grid_resolution = 129 grid_step = (domain_maxCoords - domain_minCoords)/(real(grid_resolution,mk)-1.) boundary_layer_min = 0 boundary_layer_max = 0 diff --git a/HySoP/src/main/FieldsComputation.f90 b/HySoP/src/main/FieldsComputation.f90 index 70ce7f45c7de17eb6022d88b0b1585887c8d3066..a4234bb7d44d20edb738a379761fbc3dcdb7eeaa 100755 --- a/HySoP/src/main/FieldsComputation.f90 +++ b/HySoP/src/main/FieldsComputation.f90 @@ -9,7 +9,7 @@ module FieldsComputation private - public :: fit_velocity, computeFlowRate + public :: fit_velocity, perturb contains @@ -55,8 +55,22 @@ contains print *, computed_flowrate, '//',surf vel(1,:,:,:,1) = vel(1,:,:,:,1) + constant - + end subroutine fit_velocity + + subroutine perturb(vel, time) + ! Velocity, intent(inout) + real(mk), dimension(:,:,:,:,:), pointer :: vel + ! current time + real(mk) :: time + + real(mk) :: pi + + pi = 4.0*atan(1.0_mk) + + vel(2,:,:,:,1) = sin(pi*(time-3.0)) + + end subroutine perturb subroutine computeFlowRate(vel, stepSize, topo, flowRate) diff --git a/HySoP/src/main/NavierStokes3D.f90 b/HySoP/src/main/NavierStokes3D.f90 index dc9f723e7ea241c54c201982968a81892d45bda9..2c872d60f4943a013940c75313587f20eddfe60e 100755 --- a/HySoP/src/main/NavierStokes3D.f90 +++ b/HySoP/src/main/NavierStokes3D.f90 @@ -20,7 +20,7 @@ module ppmNavierStokes3D use penalisation, only : penalisation_init, penalise_velocity, get_drag - use FieldsComputation, only : fit_velocity,computeFlowRate + use FieldsComputation, only : fit_velocity,perturb use vectorcalculus, only: curldf4, computeRHS, norm1, norm2, normInf @@ -55,7 +55,7 @@ contains ! debug mode integer :: debug ! error status - integer :: info + integer :: info,rank,sizeZ !====================== ! Init ppm @@ -82,6 +82,7 @@ contains ! Fields allocation !====================== call init_fields(domain_ghostsize, topo) + !====================== ! Init Physics !====================== @@ -94,12 +95,30 @@ contains velocity = 0.0_mk call init_vorticity(vorticity,topo,grid_step,boundary_layer_min,boundary_layer_max) - call penalisation_init(topo,grid_step,boundary_layer_min,boundary_layer_max) + call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) +!!$ vorticity = 0.0_mk +!!$ if(rank==0) then +!!$ sizeZ = size(vorticity,4) +!!$ print *, 'shape : ', shape(vorticity), sizeZ +!!$ vorticity(2,4,5,5,:) = 12.0 +!!$ else +!!$ vorticity(2,4,5,1,:) = 12.0 +!!$ end if +!!$ call MPI_BARRIER(MPI_COMM_WORLD,info) - !====================== - ! Init Particles - !====================== + call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) + call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity, 3, info) + call ppm_map_field_send(info) + call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info) + !============================================ + ! Init penalization : set chi functions + !============================================ + call penalisation_init(topo,grid_step,boundary_layer_min,boundary_layer_max) + + !============================================ + ! Build a first set of particles + !============================================ call init_particles(vorticity,topo%ID,topo%mesh(1)%ID,velocity) print *, "end of parmes:ppm:init_client" @@ -120,12 +139,14 @@ contains Re = 133.4 nu = 1./Re initial_time = 0.0 - final_time = 10.0 - time_step = 0.05 + final_time = 60.0 + time_step = 0.04 current_time = initial_time iter = 1 - max_iter= 10 -! call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + max_iter= 10000 + + + call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) ! call MPI_COMM_SIZE(MPI_COMM_WORLD,nbProcs,info) ! Initialisation stuff call init_client() @@ -136,17 +157,9 @@ contains ! Init multigrid works in ppm (or seems to ...) but solve fails. !call init_multigrid(topo%ID, topo%mesh(1)%ID, domain_ghostsize, domain_bc) - call MPI_BARRIER(MPI_COMM_WORLD,info) - ! Analytical solutions !call rhs_analytic(topo, grid_step, stream_function,vel_ex,vorticity,nu) - ! Init ghosts ... - call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) - call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,vorticity, 3, info) - call ppm_map_field_send(info) - call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,vorticity,3,domain_ghostsize,info) - derive = ppm_poisson_drv_curl_fd4 call init_poisson_solver(vorticity,velocity,topo%ID, topo%mesh(1)%ID,derive) @@ -155,13 +168,15 @@ contains print *, '[',rank,'] pas espace/temps ', grid_step, time_step print *, '[',rank,'] mesh ', shape(topo%mesh(1)%nnodes), topo%mesh(1)%nnodes - allocate(drag(max_iter)) - - open(10,file='drag') - + !if(rank==0) then + allocate(drag(max_iter)) + open(10,file='drag') + !end if + call MPI_BARRIER(MPI_COMM_WORLD,info) + print *, 'start iterations ...' ! Time loop do while(current_time < final_time .and. iter <= max_iter) - print *, '================ Start iter ',iter,' ================' +! print *, '================ Start iter ',iter,' ================' ! Compute velocity from vorticity ! Two steps: @@ -169,22 +184,24 @@ contains ! to be done in ppm routine; indeed we do not have to deal with stream_function. ! - update velocity to fit with required boundary values call solve_poisson(vorticity, velocity, topo%ID, topo%mesh(1)%ID,domain_ghostsize) - call MPI_BARRIER(MPI_COMM_WORLD,info) - !call fit_velocity(velocity, grid_step,lengths,topo,boundary_layer_min,boundary_layer_max) + call fit_velocity(velocity, grid_step,lengths,topo,boundary_layer_min,boundary_layer_max) + if( current_time>3.0 .and. current_time<4.0) then + call perturb(velocity, current_time) + end if ! Penalize velocity call penalise_velocity(velocity,topo,time_step) call ppm_map_field_ghost_get(topo%ID,topo%mesh(1)%ID,domain_ghostsize,info) call ppm_map_field_push(topo%ID,topo%mesh(1)%ID,velocity, 3, info) call ppm_map_field_send(info) call ppm_map_field_pop(topo%ID,topo%mesh(1)%ID,velocity,3,domain_ghostsize,info) - - call get_drag(drag(iter)) - !print *, 'drag...', drag(iter,rank) - write(10,'(e14.5,e14.5)') current_time, drag(iter) -!!$ ! Compute the new "penalized" vorticity + call get_drag(drag(iter)) + if(rank == 0) then + write(10,'(e14.5,e14.5)') current_time, drag(iter) + end if + ! Compute the new "penalized" vorticity call curlDF4(velocity, vorticity,topo, grid_step) - + ! Computation of rhs of vorticity equation (on the grid) ... call computeRHS(velocity, vorticity, rhs, topo, grid_step, nu) call MPI_BARRIER(MPI_COMM_WORLD,info) @@ -206,12 +223,13 @@ contains ! Remesh call remesh(topo%ID, topo%mesh(1)%ID,domain_ghostsize,vorticity) -! call printToVTK("post",iter, current_time,velocity,vorticity,topo,grid_step) - + if(mod(iter, 10).eq.0) then + call printToVTK("post",iter, current_time,velocity,vorticity,topo,grid_step) + end if current_time = current_time + time_step !current_time = final_time iter = iter+1 - print *, 'end of iteration ', iter-1 + !print *, 'end of iteration ', iter-1 call MPI_BARRIER(MPI_COMM_WORLD,info) enddo @@ -219,7 +237,6 @@ contains close(10) call ppm_finalize(info) print *, 'end ppm simulation' - deallocate(drag) end subroutine main_client diff --git a/HySoP/src/main/Particles.f90 b/HySoP/src/main/Particles.f90 index 4e3199e75f26a7675543d2d33f4d29f81a82c805..b9f9377f5989d63d9ebe03e39a342d4eca4bc401 100755 --- a/HySoP/src/main/Particles.f90 +++ b/HySoP/src/main/Particles.f90 @@ -2,9 +2,9 @@ module Particles use ppm_module_rmsh, only : ppm_rmsh_create_part,ppm_interp_m2p, ppm_interp_p2m !, ppm_rmsh_remesh - + use ppm_module_impose_part_bc use client_data, only: mk, dime -! use mpi + use mpi use ppm_module_data, only : ppm_param_rmsh_kernel_mp4 use ppm_module_map_part use ppm_module_util_dbg @@ -47,7 +47,7 @@ contains ! topo and mesh ids integer, intent(in) :: topoid integer, intent(in) :: meshid - integer :: info, newNpart + integer :: info, newNpart,rank,i ! velocity on grid real(mk), dimension(:,:,:,:,:), pointer :: vel @@ -62,31 +62,66 @@ contains npart = 0 last_npart = npart first_call = .TRUE. - + call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + ! --------> The following call will allocate memory for xp, omp and velo_p, right? ! --------> And deallocation will be handled by ppm also? call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,.TRUE.,& field_wp=vel,wp=velo_p,lda2=dime) + print *, '[',rank,'] initialisation with ', npart,' particles' !> --------> Other variables carried by particles but not allocated during ppm_rmsh_create call ! --------> is there a better way to do this, with map_push or other ppm routines? allocate(rhsp(dime,npart),xp0(dime,npart)) - - ! --------> Is the following (map_part_global) required? I would say yes but I'm not sure ... - newNpart = 0 - call ppm_map_part_global(topoid,xp,npart,info) ! positions - call ppm_map_part_push(velo_p,dime,npart,info) ! velocity - call ppm_map_part_push(rhsp,dime,npart,info) ! rhs - call ppm_map_part_push(omp,dime,npart,info) ! vorticity - call ppm_map_part_push(xp0,dime,npart,info) - call ppm_map_part_send(npart,newNpart,info) ! send - call ppm_map_part_pop(xp0,dime,npart,newNpart,info) - call ppm_map_part_pop(omp,dime,npart,newNpart,info) - call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) - call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) - call ppm_map_part_pop(xp,dime,npart,newNpart,info) - npart = newNpart + !call ppm_dbg_print_d(topoid,0.0_mk,0,3,info,xp,npart) + +! print *, '[',rank,'] shapes ...', shape(xp),'//', shape(omp), '//', shape(velo_p),'//',shape(rhsp),'//', shape(xp0) +!!$ ! --------> Is the following (map_part_global) required? I would say no but I'm not sure ... +!!$ newNpart = 0 +!!$ call ppm_impose_part_bc(topoid,xp,npart,info) +!!$ call ppm_map_part_global(topoid,xp,npart,info) ! positions +!!$ call ppm_map_part_push(velo_p,dime,npart,info) ! velocity +!!$ call ppm_map_part_push(rhsp,dime,npart,info) ! rhs +!!$ call ppm_map_part_push(omp,dime,npart,info) ! vorticity +!!$ call ppm_map_part_push(xp0,dime,npart,info) +!!$ call ppm_map_part_send(npart,newNpart,info) ! send +!!$ call ppm_map_part_pop(xp0,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(omp,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(xp,dime,npart,newNpart,info) +!!$ +!!$ npart = newNpart + + ! print *, '[',rank,'] post init with ', npart,' particles' + ! print *, '[',rank,'] post init xp/omp ', xp(3,:),velo_p(:,:),omp +!!$ if(rank==1) then +!!$ do i =1,size(xp,2) +!!$ xp(3,i) = xp(3,i) - 1.0_mk +!!$ end do +!!$ else +!!$ xp(3,:) = xp(3,:) +1.5 +!!$ end if +!!$ !call ppm_impose_part_bc(topoid,xp,npart,info) +!!$ call ppm_map_part_partial(topoid,xp,npart,info) ! positions +!!$ call ppm_map_part_push(velo_p,dime,npart,info) ! velocity +!!$ !call ppm_map_part_push(rhsp,dime,npart,info) ! rhs +!!$ !call ppm_map_part_push(omp,dime,npart,info) ! vorticity +!!$ !call ppm_map_part_push(xp0,dime,npart,info) +!!$ call ppm_map_part_send(npart,newNpart,info) ! send +!!$ !call ppm_map_part_pop(xp0,dime,npart,newNpart,info) +!!$ !call ppm_map_part_pop(omp,dime,npart,newNpart,info) +!!$ !call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(xp,dime,npart,newNpart,info) +!!$ !call ppm_impose_part_bc(topoid,xp,npart,info) +!!$ npart = newNpart +!!$ print *, '[',rank,'] post 2 init with ', npart,' particles' +!!$ print *, '[',rank,'] post 2 init xp/omp ', xp(3,:)!,velo_p(:,:) +!!$ +!!$ +!!$ end subroutine init_particles !> Create particles distribution @@ -98,7 +133,7 @@ contains ! topo and mesh ids integer, intent(in) :: topoid integer, intent(in) :: meshid - integer :: info, newNpart + integer :: info, newNpart,rank integer, dimension(:), pointer :: ghostsize ! velocity on grid real(mk), dimension(:,:,:,:,:), pointer :: vel @@ -107,27 +142,33 @@ contains info = 0 - !call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) ! Call ppm func to create the particles distribution last_npart = npart call ppm_rmsh_create_part(topoid,meshid,xp,npart,omp,dime,field_on_grid,cutoff,info,resetpos,& field_wp=vel,wp=velo_p,lda2=dime) !if(associated(xp)) print *, "xp shape:", rank, " ", shape(xp), npart - - newNpart = 0 - call ppm_map_part_partial(topoid,xp,npart,info) ! positions - call ppm_map_part_push(velo_p,dime,npart,info) ! velocity - call ppm_map_part_send(npart,newNpart,info) ! send - call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) - call ppm_map_part_pop(xp,dime,npart,newNpart,info) - npart = newNpart - - ! --------> rhsp and xp0 size was computed before rmsh_create and part_pop => must be updated - ! --------> deallocate/allocate may be done by some ppm routine? - if (last_npart.lt.npart) then - deallocate(rhsp,xp0) ! Todo : use ppm_alloc stuff + if(last_npart.lt.npart) then + deallocate(rhsp,xp0) allocate(rhsp(dime,npart),xp0(dime,npart)) end if +! newNpart = 0 +!!$ call ppm_impose_part_bc(topoid,xp,npart,info) +!!$ call ppm_map_part_global(topoid,xp,npart,info) ! positions +!!$ call ppm_map_part_push(velo_p,dime,npart,info) ! velocity +!!$ call ppm_map_part_push(rhsp,dime,npart,info) ! rhs +!!$ call ppm_map_part_push(omp,dime,npart,info) ! vorticity +!!$ call ppm_map_part_push(xp0,dime,npart,info) +!!$ call ppm_map_part_send(npart,newNpart,info) ! send +!!$ call ppm_map_part_pop(xp0,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(omp,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) +!!$ call ppm_map_part_pop(xp,dime,npart,newNpart,info) + + !print *, 'UPDATE ', npart, newNpart, shape(omp) , '\\', shape(velo_p) + + ! npart = newNpart ! --------> Ok, from here all vars carried by particles must have the following shape : dime,npart @@ -136,7 +177,7 @@ contains !> --------> mesh to particles for rhs of vorticity eq. This routine does not resize rhsp, it must be of the right size before the call? call ppm_interp_m2p(topoid, meshid, xp, npart, rhsp, dime, kernel, ghostsize, rhs,info) - print *, 'End of update particles' +! print *, 'End of update particles' end subroutine update_particles @@ -156,8 +197,8 @@ contains real(mk), dimension(:,:,:,:,:), pointer :: rhs !> local loop indices - integer :: i, newNpart - ! call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) + integer :: i, newNpart,rank + call MPI_COMM_RANK(MPI_COMM_WORLD, rank, info) do i=1,npart xp0(1,i) = xp(1,i) @@ -170,7 +211,9 @@ contains end do newNpart = 0 - + + ! Particles positions have changed ... we must map between domains + ! call ppm_impose_part_bc(topoid,xp,npart,info) ! Useless according to doc, required according to Omar ... call ppm_map_part_partial(topoid,xp,npart,info) ! positions call ppm_map_part_push(velo_p,dime,npart,info) ! velocity call ppm_map_part_push(rhsp,dime,npart,info) ! rhs @@ -179,10 +222,17 @@ contains call ppm_map_part_send(npart,newNpart,info) ! send call ppm_map_part_pop(xp0,dime,npart,newNpart,info) call ppm_map_part_pop(omp,dime,npart,newNpart,info) - call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) - call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) + call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) + call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) call ppm_map_part_pop(xp,dime,npart,newNpart,info) - + ! print *, 'PUSH 0 ', npart, newNpart, shape(omp) , '\\', shape(velo_p) + + ! Update sizes ... +! if (newNpart.lt.npart) then + ! deallocate(rhsp,xp0) ! Todo : use ppm_alloc stuff + ! allocate(rhsp(dime,newNpart),xp0(dime,newNpart)) + ! end if + npart = newNpart ! --------> No need to call any realloc or resize process for xp0, rhsp ... that was done during map_part_pop, right? @@ -203,11 +253,24 @@ contains end do newNpart = 0 + !call ppm_impose_part_bc(topoid,xp,npart,info) call ppm_map_part_partial(topoid,xp,npart,info) ! positions - call ppm_map_part_push(omp,dime,npart,info) ! strengths + call ppm_map_part_push(velo_p,dime,npart,info) ! velocity + call ppm_map_part_push(rhsp,dime,npart,info) ! rhs + call ppm_map_part_push(omp,dime,npart,info) ! vorticity + call ppm_map_part_push(xp0,dime,npart,info) call ppm_map_part_send(npart,newNpart,info) ! send - call ppm_map_part_pop(omp,dime,npart,newNpart,info) ! strengths - call ppm_map_part_pop(xp,dime,npart,newNpart,info) ! positions + call ppm_map_part_pop(xp0,dime,npart,newNpart,info) + call ppm_map_part_pop(omp,dime,npart,newNpart,info) + call ppm_map_part_pop(rhsp,dime,npart,newNpart,info) + call ppm_map_part_pop(velo_p,dime,npart,newNpart,info) + call ppm_map_part_pop(xp,dime,npart,newNpart,info) +! print *, 'PUSH 1 ', npart, newNpart, shape(omp) , '\\', shape(velo_p) +! if (newNpart.lt.npart) then +! deallocate(rhsp,xp0) ! Todo : use ppm_alloc stuff +! allocate(rhsp(dime,newNpart),xp0(dime,newNpart)) +! end if + npart = newNpart end subroutine push_particles diff --git a/HySoP/src/main/Penalize.f90 b/HySoP/src/main/Penalize.f90 index 839516870116063b173d31fee9bf5d7a4b060a0d..7543abd80df6bfb00d36a8b4b4a58befbeab7ec4 100755 --- a/HySoP/src/main/Penalize.f90 +++ b/HySoP/src/main/Penalize.f90 @@ -94,13 +94,14 @@ contains chi_sphere(i,j,k,isub) = lambda end if if( (z> boundary_layer_max(3)).or.(z< boundary_layer_min(3))) then - chi(i,j,k,isub) = lambda + chi(i,j,k,isub) = lambda end if + chi(i,j,k,isub) = chi(i,j,k,isub) + chi_sphere(i,j,k,isub) end do end do end do end do - chi = chi + chi_sphere + nullify(isublist, ndata) local_drag = 0.0 @@ -149,11 +150,12 @@ subroutine get_drag(drag) real(mk), intent(out) :: drag integer :: info real(mk) :: coef,uinf +! call MPI_BARRIER(MPI_COMM_WORLD,info) uinf = 1.0 coef = 2./(uinf**2*2.*sphere_radius) local_drag = coef*local_drag - call MPI_AllReduce(local_drag, drag, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD,info) + call MPI_AllReduce(local_drag, drag, 1, MPI_DOUBLE_PRECISION, MPI_SUM,MPI_COMM_WORLD,info) end subroutine get_drag diff --git a/HySoP/src/main/Topology.f90 b/HySoP/src/main/Topology.f90 index c4eaf7272c4bd813ff2da07ee17fbfec91a2bdf0..c9026f6a09e65dfaaa7f6993629a6639684ee109 100755 --- a/HySoP/src/main/Topology.f90 +++ b/HySoP/src/main/Topology.f90 @@ -4,7 +4,7 @@ module client_topology use ppm_module_typedef, only : ppm_t_topo use ppm_module_mktopo use ppm_module_data, only : ppm_param_assign_internal, ppm_param_decomp_cartesian, ppm_topo -! use mpi + use mpi use client_data, only: mk,dime @@ -48,7 +48,7 @@ contains istart, ndata, resolution, info) if(info.ne.0) stop 'Topology:init initialisation failed' -! call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) + call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) ! print *, '[',rank,'], topo :', shape(ndata), '//', ndata ! Get the created topology ... topo=>ppm_topo(topoid)%t @@ -61,7 +61,7 @@ contains ! sub2proc[i] = global number of the processus on which subdomain i is attached ! !!$ - print *, '[', rank,'], subs : ', topo%nsublist, '//', shape(topo%isublist),'//', topo%isublist + print *, '[', rank,'], subs : ', topo%nsublist, '//', shape(topo%isublist),'//', topo%isublist(1) !!$ print *, '======================= [', rank,'] ', shape(topo%sub2proc) !!$ print *, '======================= [', rank,'] ', topo%sub2proc !!$ diff --git a/HySoP/src/main/io_vtk.f90 b/HySoP/src/main/io_vtk.f90 index 926943d8bbc3942dcd8a6107c547a7ee9250767c..78435c51120b710738e9fd5cc8f8011c26c14ac0 100755 --- a/HySoP/src/main/io_vtk.f90 +++ b/HySoP/src/main/io_vtk.f90 @@ -3,11 +3,9 @@ module io_vtk use client_data, only:mk use client_topology, only: ppm_t_topo - !use mpi + use mpi implicit none - - contains !> print velocity and vorticity to vtk file @@ -30,7 +28,7 @@ contains integer :: isub,isubl, rank ! local resolution integer :: nx,ny,nz -! integer :: info + integer :: info !local filename (depends on subproc number) character(len=30) :: localname,buffer1, buffer2,localname2 @@ -45,7 +43,7 @@ contains !call res_vtk_vit(trim(name_vit)) !call res_vtk_omega(trim(name_omg)) - !call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) + call MPI_COMM_RANK(MPI_COMM_WORLD,rank,info) do isub=1,topo%nsublist isubl=topo%isublist(isub)